diff --git a/.github/actions/run-ee-server/action.yml b/.github/actions/run-ee-server/action.yml
new file mode 100644
index 00000000..e0790508
--- /dev/null
+++ b/.github/actions/run-ee-server/action.yml
@@ -0,0 +1,77 @@
+name: 'Run EE Server'
+description: 'Run EE server. Returns once server is ready. Only tested on Linux and macOS'
+# NOTE: do not share this server container with others
+# since it's using the default admin / admin credentials
+inputs:
+ # All inputs in composite actions are strings
+ use-server-rc:
+ required: true
+ description: Deploy server release candidate?
+ default: 'false'
+ server-tag:
+ required: true
+ description: Specify Docker tag
+ default: 'latest'
+ # Github Composite Actions can't access secrets
+ # so we need to pass them in as inputs
+ docker-hub-username:
+ description: Required for using release candidates
+ required: false
+ docker-hub-password:
+ description: Required for using release candidates
+ required: false
+
+runs:
+ using: "composite"
+ steps:
+ - name: Log into Docker Hub to get server RC
+ if: ${{ inputs.use-server-rc == 'true' }}
+ run: docker login --username ${{ inputs.docker-hub-username }} --password ${{ inputs.docker-hub-password }}
+ shell: bash
+
+ - run: echo IMAGE_NAME=aerospike/aerospike-server-enterprise${{ inputs.use-server-rc == 'true' && '-rc' || '' }}:${{ inputs.server-tag }} >> $GITHUB_ENV
+ shell: bash
+
+ - run: echo NEW_IMAGE_NAME=${{ env.IMAGE_NAME }}-security-and-sc >> $GITHUB_ENV
+ shell: bash
+
+ # macOS Github runners and Windows self-hosted runners don't have buildx installed by default
+ - if: ${{ runner.os == 'Windows' || runner.os == 'macOS' }}
+ uses: docker/setup-buildx-action@v3
+
+ - name: Build and push
+ uses: docker/build-push-action@v6
+ with:
+ # Don't want to use default Git context or else it will clone the whole client repo again
+ context: .github/workflows/docker-build-context
+ build-args: |
+ server_image=${{ env.IMAGE_NAME }}
+ tags: ${{ env.NEW_IMAGE_NAME }}
+ # setup-buildx-action configures Docker to use the docker-container build driver
+ # This driver doesn't publish an image locally by default
+ # so we have to manually enable it
+ load: true
+
+ - run: echo SERVER_CONTAINER_NAME="aerospike" >> $GITHUB_ENV
+ shell: bash
+
+ - run: docker run -d --name ${{ env.SERVER_CONTAINER_NAME }} -e DEFAULT_TTL=2592000 -p 3000:3000 ${{ env.NEW_IMAGE_NAME }}
+ shell: bash
+
+ - uses: ./.github/actions/wait-for-as-server-to-start
+ with:
+ container-name: ${{ env.SERVER_CONTAINER_NAME }}
+ is-security-enabled: false
+ is-strong-consistency-enabled: true
+
+ # All the partitions are assumed to be dead when reusing a roster file
+ - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} asadm --enable --execute "manage revive ns test"
+ shell: bash
+
+ # Apply changes
+ - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} asadm --enable --execute "manage recluster"
+ shell: bash
+
+ # For debugging
+ - run: docker logs aerospike
+ shell: bash
diff --git a/.github/actions/wait-for-as-server-to-start/action.yml b/.github/actions/wait-for-as-server-to-start/action.yml
new file mode 100644
index 00000000..373c2697
--- /dev/null
+++ b/.github/actions/wait-for-as-server-to-start/action.yml
@@ -0,0 +1,28 @@
+name: 'Wait for Aerospike server to start'
+description: Only tested on Linux and macOS
+inputs:
+ container-name:
+ required: true
+ is-security-enabled:
+ required: false
+ default: 'false'
+ is-strong-consistency-enabled:
+ required: false
+ default: 'false'
+
+runs:
+ using: "composite"
+ steps:
+ - name: 'macOS: install timeout command'
+ if: ${{ runner.os == 'macOS' }}
+ run: brew install coreutils
+ shell: bash
+
+ # Composite actions doesn't support step-level timeout-minutes
+ # Use timeout command and store polling logic in file to make it easier to read
+ # Call bash shell explicitly since timeout uses "sh" shell by default, for some reason
+ # Also, we don't want to fail if we timeout in case the server *did* finish starting up but the script couldn't detect it due to a bug
+ # Effectively, this composite action is like calling "sleep" that is optimized to exit early when it detects an ok from the server
+ - name: Wait for EE server to start
+ run: timeout 30 bash ./.github/workflows/wait-for-as-server-to-start.bash ${{ inputs.container-name }} ${{ inputs.is-security-enabled }} ${{ inputs.is-strong-consistency-enabled }} || true
+ shell: bash
diff --git a/.github/workflows/build-artifacts.yml b/.github/workflows/build-artifacts.yml
new file mode 100644
index 00000000..5ce0674d
--- /dev/null
+++ b/.github/workflows/build-artifacts.yml
@@ -0,0 +1,70 @@
+name: Build artifacts
+run-name: Build artifacts (run_tests=${{ inputs.run_tests }}, use-server-rc=${{ inputs.use-server-rc }}, server-tag=${{ inputs.server-tag }})
+
+on:
+ workflow_dispatch:
+ inputs:
+ run_tests:
+ description: "Run integration tests?"
+ required: true
+ type: boolean
+ default: false
+ use-server-rc:
+ type: boolean
+ required: true
+ default: false
+ description: 'Test against server release candidate? (e.g to test new server features)'
+ server-tag:
+ type: string
+ required: true
+ default: 'latest'
+ description: 'Server docker image tag (e.g to test a client backport version)'
+
+ workflow_call:
+ inputs:
+ # The "dev" tests test the artifacts against a server
+ run_tests:
+ required: false
+ type: boolean
+ default: false
+ # workflow_call hack
+ is_workflow_call:
+ type: boolean
+ default: true
+ required: false
+ # This input is only used in workflow_call events
+ sha-to-build-and-test:
+ description: A calling workflow may want to run this workflow on a different ref than the calling workflow's ref
+ type: string
+ # Make it required to make things simple
+ required: true
+ # A calling workflow doesn't actually set values to the inputs below
+ # But that workflow needs to have default values for these inputs
+ use-server-rc:
+ required: false
+ default: false
+ type: boolean
+ server-tag:
+ type: string
+ required: false
+ default: 'latest'
+ secrets:
+ DOCKER_HUB_BOT_USERNAME:
+ required: true
+ DOCKER_HUB_BOT_PW:
+ required: true
+ MAC_M1_SELF_HOSTED_RUNNER_PW:
+ required: true
+
+jobs:
+ dotnet:
+ strategy:
+ fail-fast: false
+ uses: ./.github/workflows/dotnet.yml
+ with:
+ # Can't use env context here, so just copy from build-sdist env var
+ sha-to-build-and-test: ${{ inputs.is_workflow_call == true && inputs.sha-to-build-and-test || github.sha }}
+ run_tests: ${{ inputs.run_tests }}
+ use-server-rc: ${{ inputs.use-server-rc }}
+ server-tag: ${{ inputs.server-tag }}
+ secrets: inherit
diff --git a/.github/workflows/docker-build-context/Dockerfile b/.github/workflows/docker-build-context/Dockerfile
new file mode 100644
index 00000000..5f20cb51
--- /dev/null
+++ b/.github/workflows/docker-build-context/Dockerfile
@@ -0,0 +1,39 @@
+ARG server_image=aerospike/aerospike-server-enterprise
+ARG ROSTER_FILE_NAME=roster.smd
+# Temp file for passing node id from one build stage to another
+# Docker doesn't support command substitution for setting values for ARG variables, so we have to do this
+ARG NODE_ID_FILE_NAME=node_id
+
+FROM $server_image as configure-server
+
+WORKDIR /opt/aerospike/smd
+
+# Enable authentication
+
+ARG AEROSPIKE_CONF_TEMPLATE_PATH=/etc/aerospike/aerospike.template.conf
+
+# Enable strong consistency
+RUN sed -i "s/\(namespace.*{\)/\1\n\tstrong-consistency true/" $AEROSPIKE_CONF_TEMPLATE_PATH
+RUN sed -i "s/\(namespace.*{\)/\1\n\tstrong-consistency-allow-expunge true/" $AEROSPIKE_CONF_TEMPLATE_PATH
+ARG ROSTER_FILE_NAME
+COPY $ROSTER_FILE_NAME .
+
+# Fetch node id from roster.smd
+
+# There's no tag for the latest major version to prevent breaking changes in jq
+# This is the next best thing
+FROM ghcr.io/jqlang/jq:1.7 as get-jq
+# jq docker image doesn't have a shell
+# We need a shell to fetch and pass the node id to the next build stage
+FROM busybox as get-node-id
+COPY --from=get-jq /jq /bin/
+ARG ROSTER_FILE_NAME
+COPY $ROSTER_FILE_NAME .
+ARG NODE_ID_FILE_NAME
+RUN jq --raw-output '.[1].value' $ROSTER_FILE_NAME > $NODE_ID_FILE_NAME
+
+FROM configure-server as set-node-id
+ARG NODE_ID_FILE_NAME
+COPY --from=get-node-id $NODE_ID_FILE_NAME .
+RUN sed -i "s/\(^service {\)/\1\n\tnode-id $(cat $NODE_ID_FILE_NAME)/" $AEROSPIKE_CONF_TEMPLATE_PATH
+RUN rm $NODE_ID_FILE_NAME
diff --git a/.github/workflows/docker-build-context/roster.smd b/.github/workflows/docker-build-context/roster.smd
new file mode 100644
index 00000000..66daed5f
--- /dev/null
+++ b/.github/workflows/docker-build-context/roster.smd
@@ -0,0 +1,12 @@
+[
+ [
+ 97107025374203,
+ 1
+ ],
+ {
+ "key": "test",
+ "value": "a1",
+ "generation": 1,
+ "timestamp": 465602976982
+ }
+]
diff --git a/.github/workflows/docker-build-context/security.smd b/.github/workflows/docker-build-context/security.smd
new file mode 100644
index 00000000..9c530d51
--- /dev/null
+++ b/.github/workflows/docker-build-context/security.smd
@@ -0,0 +1,48 @@
+[
+ [
+ 162276881999406,
+ 14
+ ],
+ {
+ "key": "admin|P",
+ "value": "$2a$10$7EqJtq98hPqEX7fNZaFWoO1mVO/4MLpGzsqojz6E9Gef6iXDjXdDa",
+ "generation": 1,
+ "timestamp": 0
+ },
+ {
+ "key": "admin|R|user-admin",
+ "value": "",
+ "generation": 1,
+ "timestamp": 0
+ },
+ {
+ "key": "superuser|P",
+ "value": "$2a$10$7EqJtq98hPqEX7fNZaFWoOZX0o4mZCBUwvzt/iecIcG4JaDOC41zK",
+ "generation": 3,
+ "timestamp": 458774922440
+ },
+ {
+ "key": "superuser|R|read-write-udf",
+ "value": "",
+ "generation": 3,
+ "timestamp": 458774922441
+ },
+ {
+ "key": "superuser|R|sys-admin",
+ "value": "",
+ "generation": 3,
+ "timestamp": 458774922442
+ },
+ {
+ "key": "superuser|R|user-admin",
+ "value": "",
+ "generation": 3,
+ "timestamp": 458774922442
+ },
+ {
+ "key": "superuser|R|data-admin",
+ "value": null,
+ "generation": 2,
+ "timestamp": 458774718056
+ }
+]
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
new file mode 100644
index 00000000..9cfaf6c7
--- /dev/null
+++ b/.github/workflows/tests.yml
@@ -0,0 +1,58 @@
+name: Run tests
+
+# Trigger test workflow whenever:
+# 1. A pull request is updated (e.g with new commits)
+# 2. Commits are pushed directly to the stage or master branch
+on:
+ push:
+ branches: ["stage*", "master*"]
+ pull_request:
+ branches: ["stage*", "master*"]
+ types: [
+ # Default triggers
+ opened,
+ synchronize,
+ reopened,
+ # Additional triggers
+ labeled,
+ unlabeled
+ ]
+ workflow_dispatch:
+ inputs:
+ test-server-rc:
+ type: boolean
+ default: false
+ required: true
+
+jobs:
+
+ test-ee:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+
+ - uses: ./.github/actions/run-ee-server
+ with:
+ use-server-rc: ${{ contains(github.event.pull_request.labels.*.name, 'new-server-features') }}
+ docker-hub-username: ${{ secrets.DOCKER_HUB_BOT_USERNAME }}
+ docker-hub-password: ${{ secrets.DOCKER_HUB_BOT_PW }}
+
+ - name: Setup .NET
+ uses: actions/setup-dotnet@v4
+ with:
+ dotnet-version: 6.0.x
+
+ - name: Restore dependencies
+ run: dotnet restore /p:EnableWindowsTargeting=true
+
+ - name: Build
+ run: dotnet build --configuration Release --no-restore /p:EnableWindowsTargeting=true
+
+ - name: Run tests
+ run: dotnet test --configuration Release --no-build --verbosity normal
+
+ - name: Show logs if failed
+ if: ${{ failure() }}
+ run: |
+ docker container logs aerospike
+ cat ./configs/aerospike.conf
diff --git a/.github/workflows/wait-for-as-server-to-start.bash b/.github/workflows/wait-for-as-server-to-start.bash
new file mode 100644
index 00000000..c43e17da
--- /dev/null
+++ b/.github/workflows/wait-for-as-server-to-start.bash
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+set -x
+# Makes sure that if the "docker exec" command fails, it is not ignored
+set -o pipefail
+
+container_name=$1
+is_security_enabled=$2
+
+if [[ $is_security_enabled == true ]]; then
+ # We need to pass credentials to asinfo if server requires it
+ # TODO: passing in credentials via command line flags since I can't figure out how to use --instance with global astools.conf
+ user_credentials="--user=admin --password=admin"
+fi
+
+while true; do
+ # An unset variable will have a default empty value
+ # Intermediate step is to print docker exec command's output in case it fails
+ # Sometimes, errors only appear in stdout and not stderr, like if asinfo throws an error because of no credentials
+ # (This is a bug in asinfo since all error messages should be sent to stderr)
+ # But piping and passing stdin to grep will hide the first command's stdout.
+ # grep doesn't have a way to print all lines passed as input.
+ # ack does have an option but it doesn't come installed by default
+ # shellcheck disable=SC2086 # The flags in user credentials should be separate anyways. Not one string
+ echo "Checking if we can reach the server via the service port..."
+ if docker exec "$container_name" asinfo $user_credentials -v status | tee >(cat) | grep -qE "^ok"; then
+ # Server is ready when asinfo returns ok
+ echo "Can reach server now."
+ # docker container inspect "$container_name"
+ break
+ fi
+
+ echo "Server didn't return ok via the service port. Polling again..."
+done
+
+# Although the server may be reachable via the service port, the cluster may not be fully initialized yet.
+# If we try to connect too soon (e.g right after "status" returns ok), the client may throw error code -1
+while true; do
+ echo "Waiting for server to stabilize (i.e return a cluster key)..."
+ # We assume that when an ERROR is returned, the cluster is not stable yet (i.e not fully initialized)
+ if docker exec "$container_name" asinfo $user_credentials -v cluster-stable 2>&1 | (! grep -qE "^ERROR"); then
+ echo "Server is in a stable state."
+ break
+ fi
+
+ echo "Server did not return a cluster key. Polling again..."
+done
diff --git a/AerospikeClient/Admin/Role.cs b/AerospikeClient/Admin/Role.cs
index feaa3130..2b8af1aa 100644
--- a/AerospikeClient/Admin/Role.cs
+++ b/AerospikeClient/Admin/Role.cs
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2022 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements.
@@ -49,22 +49,22 @@ public sealed class Role
public const string SIndexAdmin = "sindex-admin";
///
- /// Allow read transactions.
+ /// Allow read commands.
///
public const string Read = "read";
///
- /// Allow read and write transactions.
+ /// Allow read and write commands.
///
public const string ReadWrite = "read-write";
///
- /// Allow read and write transactions within user defined functions.
+ /// Allow read and write commands within user defined functions.
///
public const string ReadWriteUdf = "read-write-udf";
///
- /// Allow write transactions.
+ /// Allow write commands.
///
public const string Write = "write";
diff --git a/AerospikeClient/Async/AsyncBatch.cs b/AerospikeClient/Async/AsyncBatch.cs
index 8f3a7b75..4d6056e3 100644
--- a/AerospikeClient/Async/AsyncBatch.cs
+++ b/AerospikeClient/Async/AsyncBatch.cs
@@ -1,1910 +1,2178 @@
-/*
- * Copyright 2012-2024 Aerospike, Inc.
- *
- * Portions may be licensed to Aerospike, Inc. under one or more contributor
- * license agreements.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-namespace Aerospike.Client
-{
- //-------------------------------------------------------
- // ReadList
- //-------------------------------------------------------
-
- public sealed class AsyncBatchReadListExecutor : AsyncBatchExecutor
- {
- private readonly BatchListListener listener;
- private readonly List records;
-
- public AsyncBatchReadListExecutor
- (
- AsyncCluster cluster,
- BatchPolicy policy,
- BatchListListener listener,
- List records
- ) : base(cluster, true)
- {
- this.listener = listener;
- this.records = records;
-
- // Create commands.
- List batchNodes = BatchNode.GenerateList(cluster, policy, records, this);
- AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new AsyncBatchReadListCommand(this, cluster, batchNode, policy, records);
- }
- // Dispatch commands to nodes.
- Execute(commands);
- }
-
- protected internal override void OnSuccess()
- {
- listener.OnSuccess(records);
- }
-
- protected internal override void OnFailure(AerospikeException ae)
- {
- listener.OnFailure(ae);
- }
- }
-
- sealed class AsyncBatchReadListCommand : AsyncBatchCommand
- {
- private readonly List records;
-
- public AsyncBatchReadListCommand
- (
- AsyncBatchExecutor parent,
- AsyncCluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- List records
- ) : base(parent, cluster, batch, batchPolicy, true)
- {
- this.records = records;
- }
-
- public AsyncBatchReadListCommand(AsyncBatchReadListCommand other) : base(other)
- {
- this.records = other.records;
- }
-
- protected internal override void WriteBuffer()
- {
- if (batch.node.HasBatchAny)
- {
- SetBatchOperate(batchPolicy, records, batch);
- }
- else
- {
- SetBatchRead(batchPolicy, records, batch);
- }
- }
-
- protected internal override void ParseRow()
- {
- SkipKey(fieldCount);
-
- BatchRead record = records[batchIndex];
-
- if (resultCode == 0)
- {
- record.SetRecord(ParseRecord());
- }
- else
- {
- record.SetError(resultCode, false);
- }
- }
-
- protected internal override AsyncCommand CloneCommand()
- {
- return new AsyncBatchReadListCommand(this);
- }
-
- internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
- {
- return new AsyncBatchReadListCommand(parent, cluster, batchNode, batchPolicy, records);
- }
-
- internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent);
- }
- }
-
- //-------------------------------------------------------
- // ReadSequence
- //-------------------------------------------------------
-
- public sealed class AsyncBatchReadSequenceExecutor : AsyncBatchExecutor
- {
- private readonly BatchSequenceListener listener;
-
- public AsyncBatchReadSequenceExecutor
- (
- AsyncCluster cluster,
- BatchPolicy policy,
- BatchSequenceListener listener,
- List records
- ) : base(cluster, true)
- {
- this.listener = listener;
-
- // Create commands.
- List batchNodes = BatchNode.GenerateList(cluster, policy, records, this);
- AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new AsyncBatchReadSequenceCommand(this, cluster, batchNode, policy, listener, records);
- }
- // Dispatch commands to nodes.
- Execute(commands);
- }
-
- protected internal override void OnSuccess()
- {
- listener.OnSuccess();
- }
-
- protected internal override void OnFailure(AerospikeException ae)
- {
- listener.OnFailure(ae);
- }
- }
-
- sealed class AsyncBatchReadSequenceCommand : AsyncBatchCommand
- {
- private readonly BatchSequenceListener listener;
- private readonly List records;
-
- public AsyncBatchReadSequenceCommand
- (
- AsyncBatchExecutor parent,
- AsyncCluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- BatchSequenceListener listener,
- List records
- ) : base(parent, cluster, batch, batchPolicy, true)
- {
- this.listener = listener;
- this.records = records;
- }
-
- public AsyncBatchReadSequenceCommand(AsyncBatchReadSequenceCommand other) : base(other)
- {
- this.listener = other.listener;
- this.records = other.records;
- }
-
- protected internal override void WriteBuffer()
- {
- if (batch.node.HasBatchAny)
- {
- SetBatchOperate(batchPolicy, records, batch);
- }
- else
- {
- SetBatchRead(batchPolicy, records, batch);
- }
- }
-
- protected internal override void ParseRow()
- {
- SkipKey(fieldCount);
-
- BatchRead record = records[batchIndex];
-
- if (resultCode == 0)
- {
- record.SetRecord(ParseRecord());
- }
- else
- {
- record.SetError(resultCode, false);
- }
- listener.OnRecord(record);
- }
-
- protected internal override AsyncCommand CloneCommand()
- {
- return new AsyncBatchReadSequenceCommand(this);
- }
-
- internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
- {
- return new AsyncBatchReadSequenceCommand(parent, cluster, batchNode, batchPolicy, listener, records);
- }
-
- internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent);
- }
- }
-
- //-------------------------------------------------------
- // GetArray
- //-------------------------------------------------------
-
- public sealed class AsyncBatchGetArrayExecutor : AsyncBatchExecutor
- {
- private readonly Key[] keys;
- private readonly Record[] records;
- private readonly RecordArrayListener listener;
-
- public AsyncBatchGetArrayExecutor
- (
- AsyncCluster cluster,
- BatchPolicy policy,
- RecordArrayListener listener,
- Key[] keys,
- string[] binNames,
- Operation[] ops,
- int readAttr,
- bool isOperation
- ) : base(cluster, false)
- {
- this.keys = keys;
- this.records = new Record[keys.Length];
- this.listener = listener;
-
- // Create commands.
- List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this);
- AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new AsyncBatchGetArrayCommand(this, cluster, batchNode, policy, keys, binNames, ops, records, readAttr, isOperation);
- }
- // Dispatch commands to nodes.
- Execute(commands);
- }
-
- protected internal override void OnSuccess()
- {
- listener.OnSuccess(keys, records);
- }
-
- protected internal override void OnFailure(AerospikeException ae)
- {
- listener.OnFailure(new AerospikeException.BatchRecords(records, ae));
- }
- }
-
- sealed class AsyncBatchGetArrayCommand : AsyncBatchCommand
- {
- private readonly Key[] keys;
- private readonly string[] binNames;
- private readonly Operation[] ops;
- private readonly Record[] records;
- private readonly int readAttr;
-
- public AsyncBatchGetArrayCommand
- (
- AsyncBatchExecutor parent,
- AsyncCluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- Key[] keys,
- string[] binNames,
- Operation[] ops,
- Record[] records,
- int readAttr,
- bool isOperation
- ) : base(parent, cluster, batch, batchPolicy, isOperation)
- {
- this.keys = keys;
- this.binNames = binNames;
- this.ops = ops;
- this.records = records;
- this.readAttr = readAttr;
- }
-
- public AsyncBatchGetArrayCommand(AsyncBatchGetArrayCommand other) : base(other)
- {
- this.keys = other.keys;
- this.binNames = other.binNames;
- this.ops = other.ops;
- this.records = other.records;
- this.readAttr = other.readAttr;
- }
-
- protected internal override void WriteBuffer()
- {
- if (batch.node.HasBatchAny)
- {
- BatchAttr attr = new BatchAttr(batchPolicy, readAttr, ops);
- SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr);
- }
- else
- {
- SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr);
- }
- }
-
- protected internal override void ParseRow()
- {
- SkipKey(fieldCount);
-
- if (resultCode == 0)
- {
- records[batchIndex] = ParseRecord();
- }
- }
-
- protected internal override AsyncCommand CloneCommand()
- {
- return new AsyncBatchGetArrayCommand(this);
- }
-
- internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
- {
- return new AsyncBatchGetArrayCommand(parent, cluster, batchNode, batchPolicy, keys, binNames, ops, records, readAttr, isOperation);
- }
-
- internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent);
- }
- }
-
- //-------------------------------------------------------
- // GetSequence
- //-------------------------------------------------------
-
- public sealed class AsyncBatchGetSequenceExecutor : AsyncBatchExecutor
- {
- private readonly RecordSequenceListener listener;
-
- public AsyncBatchGetSequenceExecutor
- (
- AsyncCluster cluster,
- BatchPolicy policy,
- RecordSequenceListener listener,
- Key[] keys,
- string[] binNames,
- Operation[] ops,
- int readAttr,
- bool isOperation
- ) : base(cluster, false)
- {
- this.listener = listener;
-
- // Create commands.
- List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this);
- AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new AsyncBatchGetSequenceCommand(this, cluster, batchNode, policy, keys, binNames, ops, listener, readAttr, isOperation);
- }
- // Dispatch commands to nodes.
- Execute(commands);
- }
-
- protected internal override void OnSuccess()
- {
- listener.OnSuccess();
- }
-
- protected internal override void OnFailure(AerospikeException ae)
- {
- listener.OnFailure(ae);
- }
- }
-
- sealed class AsyncBatchGetSequenceCommand : AsyncBatchCommand
- {
- private readonly Key[] keys;
- private readonly string[] binNames;
- private readonly Operation[] ops;
- private readonly RecordSequenceListener listener;
- private readonly int readAttr;
-
- public AsyncBatchGetSequenceCommand
- (
- AsyncBatchExecutor parent,
- AsyncCluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- Key[] keys,
- string[] binNames,
- Operation[] ops,
- RecordSequenceListener listener,
- int readAttr,
- bool isOperation
- ) : base(parent, cluster, batch, batchPolicy, isOperation)
- {
- this.keys = keys;
- this.binNames = binNames;
- this.ops = ops;
- this.listener = listener;
- this.readAttr = readAttr;
- }
-
- public AsyncBatchGetSequenceCommand(AsyncBatchGetSequenceCommand other) : base(other)
- {
- this.keys = other.keys;
- this.binNames = other.binNames;
- this.ops = other.ops;
- this.listener = other.listener;
- this.readAttr = other.readAttr;
- }
-
- protected internal override void WriteBuffer()
- {
- if (batch.node.HasBatchAny)
- {
- BatchAttr attr = new BatchAttr(batchPolicy, readAttr, ops);
- SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr);
- }
- else
- {
- SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr);
- }
- }
-
- protected internal override void ParseRow()
- {
- SkipKey(fieldCount);
-
- Key keyOrig = keys[batchIndex];
-
- if (resultCode == 0)
- {
- Record record = ParseRecord();
- listener.OnRecord(keyOrig, record);
- }
- else
- {
- listener.OnRecord(keyOrig, null);
- }
- }
-
- protected internal override AsyncCommand CloneCommand()
- {
- return new AsyncBatchGetSequenceCommand(this);
- }
-
- internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
- {
- return new AsyncBatchGetSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, binNames, ops, listener, readAttr, isOperation);
- }
-
- internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent);
- }
- }
-
- //-------------------------------------------------------
- // ExistsArray
- //-------------------------------------------------------
-
- public sealed class AsyncBatchExistsArrayExecutor : AsyncBatchExecutor
- {
- private readonly Key[] keys;
- private readonly bool[] existsArray;
- private readonly ExistsArrayListener listener;
-
- public AsyncBatchExistsArrayExecutor
- (
- AsyncCluster cluster,
- BatchPolicy policy,
- Key[] keys,
- ExistsArrayListener listener
- ) : base(cluster,false)
- {
- this.keys = keys;
- this.existsArray = new bool[keys.Length];
- this.listener = listener;
-
- // Create commands.
- List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this);
- AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new AsyncBatchExistsArrayCommand(this, cluster, batchNode, policy, keys, existsArray);
- }
- // Dispatch commands to nodes.
- Execute(commands);
- }
-
- protected internal override void OnSuccess()
- {
- listener.OnSuccess(keys, existsArray);
- }
-
- protected internal override void OnFailure(AerospikeException ae)
- {
- listener.OnFailure(new AerospikeException.BatchExists(existsArray, ae));
- }
- }
-
- sealed class AsyncBatchExistsArrayCommand : AsyncBatchCommand
- {
- private readonly Key[] keys;
- private readonly bool[] existsArray;
-
- public AsyncBatchExistsArrayCommand
- (
- AsyncBatchExecutor parent,
- AsyncCluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- Key[] keys,
- bool[] existsArray
- ) : base(parent, cluster, batch, batchPolicy, false)
- {
- this.keys = keys;
- this.existsArray = existsArray;
- }
-
- public AsyncBatchExistsArrayCommand(AsyncBatchExistsArrayCommand other) : base(other)
- {
- this.keys = other.keys;
- this.existsArray = other.existsArray;
- }
-
- protected internal override void WriteBuffer()
- {
- if (batch.node.HasBatchAny)
- {
- BatchAttr attr = new BatchAttr(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA);
- SetBatchOperate(batchPolicy, keys, batch, null, null, attr);
- }
- else
- {
- SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA);
- }
- }
-
- protected internal override void ParseRow()
- {
- SkipKey(fieldCount);
-
- if (opCount > 0)
- {
- throw new AerospikeException.Parse("Received bins that were not requested!");
- }
-
- existsArray[batchIndex] = resultCode == 0;
- }
-
- protected internal override AsyncCommand CloneCommand()
- {
- return new AsyncBatchExistsArrayCommand(this);
- }
-
- internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
- {
- return new AsyncBatchExistsArrayCommand(parent, cluster, batchNode, batchPolicy, keys, existsArray);
- }
-
- internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent);
- }
- }
-
- //-------------------------------------------------------
- // ExistsSequence
- //-------------------------------------------------------
-
- public sealed class AsyncBatchExistsSequenceExecutor : AsyncBatchExecutor
- {
- private readonly ExistsSequenceListener listener;
-
- public AsyncBatchExistsSequenceExecutor
- (
- AsyncCluster cluster,
- BatchPolicy policy,
- Key[] keys,
- ExistsSequenceListener listener
- ) : base(cluster, false)
- {
- this.listener = listener;
-
- // Create commands.
- List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this);
- AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new AsyncBatchExistsSequenceCommand(this, cluster, batchNode, policy, keys, listener);
- }
- // Dispatch commands to nodes.
- Execute(commands);
- }
-
- protected internal override void OnSuccess()
- {
- listener.OnSuccess();
- }
-
- protected internal override void OnFailure(AerospikeException ae)
- {
- listener.OnFailure(ae);
- }
-
- }
-
- sealed class AsyncBatchExistsSequenceCommand : AsyncBatchCommand
- {
- private readonly Key[] keys;
- private readonly ExistsSequenceListener listener;
-
- public AsyncBatchExistsSequenceCommand
- (
- AsyncBatchExecutor parent,
- AsyncCluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- Key[] keys,
- ExistsSequenceListener listener
- ) : base(parent, cluster, batch, batchPolicy, false)
- {
- this.keys = keys;
- this.listener = listener;
- }
-
- public AsyncBatchExistsSequenceCommand(AsyncBatchExistsSequenceCommand other) : base(other)
- {
- this.keys = other.keys;
- this.listener = other.listener;
- }
-
- protected internal override void WriteBuffer()
- {
- if (batch.node.HasBatchAny)
- {
- BatchAttr attr = new BatchAttr(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA);
- SetBatchOperate(batchPolicy, keys, batch, null, null, attr);
- }
- else
- {
- SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA);
- }
- }
-
- protected internal override void ParseRow()
- {
- SkipKey(fieldCount);
-
- if (opCount > 0)
- {
- throw new AerospikeException.Parse("Received bins that were not requested!");
- }
-
- Key keyOrig = keys[batchIndex];
- listener.OnExists(keyOrig, resultCode == 0);
- }
-
- protected internal override AsyncCommand CloneCommand()
- {
- return new AsyncBatchExistsSequenceCommand(this);
- }
-
- internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
- {
- return new AsyncBatchExistsSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, listener);
- }
-
- internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent);
- }
- }
-
- //-------------------------------------------------------
- // OperateList
- //-------------------------------------------------------
-
- public sealed class AsyncBatchOperateListExecutor : AsyncBatchExecutor
- {
- internal readonly BatchOperateListListener listener;
- internal readonly List records;
-
- public AsyncBatchOperateListExecutor
- (
- AsyncCluster cluster,
- BatchPolicy policy,
- BatchOperateListListener listener,
- List records
- ) : base(cluster, true)
- {
- this.listener = listener;
- this.records = records;
-
- // Create commands.
- List batchNodes = BatchNode.GenerateList(cluster, policy, records, this);
- AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- tasks[count++] = new AsyncBatchOperateListCommand(this, cluster, batchNode, policy, records);
- }
- // Dispatch commands to nodes.
- Execute(tasks);
- }
-
- protected internal override void OnSuccess()
- {
- listener.OnSuccess(records, GetStatus());
- }
-
- protected internal override void OnFailure(AerospikeException ae)
- {
- listener.OnFailure(ae);
- }
- }
-
- sealed class AsyncBatchOperateListCommand : AsyncBatchCommand
- {
- internal readonly List records;
-
- public AsyncBatchOperateListCommand
- (
- AsyncBatchExecutor parent,
- AsyncCluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- List records
- ) : base(parent, cluster, batch, batchPolicy, true)
- {
- this.records = records;
- }
-
- public AsyncBatchOperateListCommand(AsyncBatchOperateListCommand other) : base(other)
- {
- this.records = other.records;
- }
-
- protected internal override bool IsWrite()
- {
- // This method is only called to set inDoubt on node level errors.
- // SetError() will filter out reads when setting record level inDoubt.
- return true;
- }
-
- protected internal override void WriteBuffer()
- {
- SetBatchOperate(batchPolicy, records, batch);
- }
-
- protected internal override void ParseRow()
- {
- SkipKey(fieldCount);
-
- BatchRecord record = records[batchIndex];
-
- if (resultCode == 0)
- {
- record.SetRecord(ParseRecord());
- return;
- }
-
- if (resultCode == ResultCode.UDF_BAD_RESPONSE)
- {
- Record r = ParseRecord();
- string m = r.GetString("FAILURE");
-
- if (m != null)
- {
- // Need to store record because failure bin contains an error message.
- record.record = r;
- record.resultCode = resultCode;
- record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter);
- parent.SetRowError();
- return;
- }
- }
-
- record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter));
- parent.SetRowError();
- }
-
- internal override void SetInDoubt(bool inDoubt)
- {
- if (!inDoubt)
- {
- return;
- }
-
- foreach (int index in batch.offsets)
- {
- BatchRecord record = records[index];
-
- if (record.resultCode == ResultCode.NO_RESPONSE)
- {
- record.inDoubt = record.hasWrite;
- }
- }
- }
-
- protected internal override AsyncCommand CloneCommand()
- {
- return new AsyncBatchOperateListCommand(this);
- }
-
- internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
- {
- return new AsyncBatchOperateListCommand(parent, cluster, batchNode, batchPolicy, records);
- }
-
- internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent);
- }
- }
-
- //-------------------------------------------------------
- // OperateSequence
- //-------------------------------------------------------
-
- public sealed class AsyncBatchOperateSequenceExecutor : AsyncBatchExecutor
- {
- internal readonly BatchRecordSequenceListener listener;
-
- public AsyncBatchOperateSequenceExecutor
- (
- AsyncCluster cluster,
- BatchPolicy policy,
- BatchRecordSequenceListener listener,
- List records
- ) : base(cluster, true)
- {
- this.listener = listener;
-
- // Create commands.
- List batchNodes = BatchNode.GenerateList(cluster, policy, records, this);
- AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- tasks[count++] = new AsyncBatchOperateSequenceCommand(this, cluster, batchNode, policy, listener, records);
- }
- // Dispatch commands to nodes.
- Execute(tasks);
- }
-
- protected internal override void OnSuccess()
- {
- listener.OnSuccess();
- }
-
- protected internal override void OnFailure(AerospikeException ae)
- {
- listener.OnFailure(ae);
- }
- }
-
- sealed class AsyncBatchOperateSequenceCommand : AsyncBatchCommand
- {
- internal readonly BatchRecordSequenceListener listener;
- internal readonly List records;
-
- public AsyncBatchOperateSequenceCommand
- (
- AsyncBatchExecutor parent,
- AsyncCluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- BatchRecordSequenceListener listener,
- List records
- ) : base(parent, cluster, batch, batchPolicy, true)
- {
- this.listener = listener;
- this.records = records;
- }
-
- public AsyncBatchOperateSequenceCommand(AsyncBatchOperateSequenceCommand other) : base(other)
- {
- this.listener = other.listener;
- this.records = other.records;
- }
-
- protected internal override bool IsWrite()
- {
- // This method is only called to set inDoubt on node level errors.
- // SetError() will filter out reads when setting record level inDoubt.
- return true;
- }
-
- protected internal override void WriteBuffer()
- {
- SetBatchOperate(batchPolicy, records, batch);
- }
-
- protected internal override void ParseRow()
- {
- SkipKey(fieldCount);
-
- BatchRecord record = records[batchIndex];
-
- if (resultCode == 0)
- {
- record.SetRecord(ParseRecord());
- }
- else if (resultCode == ResultCode.UDF_BAD_RESPONSE)
- {
- Record r = ParseRecord();
- string m = r.GetString("FAILURE");
-
- if (m != null)
- {
- // Need to store record because failure bin contains an error message.
- record.record = r;
- record.resultCode = resultCode;
- record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter);
- }
- else
- {
- record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter));
- }
- }
- else
- {
- record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter));
- }
- AsyncBatch.OnRecord(cluster, listener, record, batchIndex);
- }
-
- internal override void SetInDoubt(bool inDoubt)
- {
- if (!inDoubt)
- {
- return;
- }
-
- foreach (int index in batch.offsets)
- {
- BatchRecord record = records[index];
-
- if (record.resultCode == ResultCode.NO_RESPONSE)
- {
- // Set inDoubt, but do not call OnRecord() because user already has access to full
- // BatchRecord list and can examine each record for inDoubt when the exception occurs.
- record.inDoubt = record.hasWrite;
- }
- }
- }
-
- protected internal override AsyncCommand CloneCommand()
- {
- return new AsyncBatchOperateSequenceCommand(this);
- }
-
- internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
- {
- return new AsyncBatchOperateSequenceCommand(parent, cluster, batchNode, batchPolicy, listener, records);
- }
-
- internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent);
- }
- }
-
- //-------------------------------------------------------
- // OperateRecordArray
- //-------------------------------------------------------
-
- public sealed class AsyncBatchOperateRecordArrayExecutor : AsyncBatchExecutor
- {
- internal readonly BatchRecordArrayListener listener;
- internal readonly BatchRecord[] records;
-
- public AsyncBatchOperateRecordArrayExecutor
- (
- AsyncCluster cluster,
- BatchPolicy policy,
- BatchRecordArrayListener listener,
- Key[] keys,
- Operation[] ops,
- BatchAttr attr
- ) : base(cluster, true)
- {
- this.listener = listener;
- this.records = new BatchRecord[keys.Length];
-
- for (int i = 0; i < keys.Length; i++)
- {
- this.records[i] = new BatchRecord(keys[i], attr.hasWrite);
- }
-
- // Create commands.
- List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, attr.hasWrite, this);
- AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- tasks[count++] = new AsyncBatchOperateRecordArrayCommand(this, cluster, batchNode, policy, keys, ops, records, attr);
- }
- // Dispatch commands to nodes.
- Execute(tasks);
- }
-
- protected internal override void OnSuccess()
- {
- listener.OnSuccess(records, GetStatus());
- }
-
- protected internal override void OnFailure(AerospikeException ae)
- {
- listener.OnFailure(records, ae);
- }
- }
-
- sealed class AsyncBatchOperateRecordArrayCommand : AsyncBatchCommand
- {
- internal readonly Key[] keys;
- internal readonly Operation[] ops;
- internal readonly BatchRecord[] records;
- internal readonly BatchAttr attr;
-
- public AsyncBatchOperateRecordArrayCommand
- (
- AsyncBatchExecutor parent,
- AsyncCluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- Key[] keys,
- Operation[] ops,
- BatchRecord[] records,
- BatchAttr attr
- ) : base(parent, cluster, batch, batchPolicy, ops != null)
- {
- this.keys = keys;
- this.ops = ops;
- this.records = records;
- this.attr = attr;
- }
-
- public AsyncBatchOperateRecordArrayCommand(AsyncBatchOperateRecordArrayCommand other) : base(other)
- {
- this.keys = other.keys;
- this.ops = other.ops;
- this.records = other.records;
- this.attr = other.attr;
- }
-
- protected internal override bool IsWrite()
- {
- return attr.hasWrite;
- }
-
- protected internal override void WriteBuffer()
- {
- SetBatchOperate(batchPolicy, keys, batch, null, ops, attr);
- }
-
- protected internal override void ParseRow()
- {
- SkipKey(fieldCount);
-
- BatchRecord record = records[batchIndex];
-
- if (resultCode == 0)
- {
- record.SetRecord(ParseRecord());
- }
- else
- {
- record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter));
- parent.SetRowError();
- }
- }
-
- internal override void SetInDoubt(bool inDoubt)
- {
- if (!inDoubt || !attr.hasWrite)
- {
- return;
- }
-
- foreach (int index in batch.offsets)
- {
- BatchRecord record = records[index];
-
- if (record.resultCode == ResultCode.NO_RESPONSE)
- {
- record.inDoubt = inDoubt;
- }
- }
- }
-
- protected internal override AsyncCommand CloneCommand()
- {
- return new AsyncBatchOperateRecordArrayCommand(this);
- }
-
- internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
- {
- return new AsyncBatchOperateRecordArrayCommand(parent, cluster, batchNode, batchPolicy, keys, ops, records, attr);
- }
-
- internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, parent);
- }
- }
-
- //-------------------------------------------------------
- // OperateRecordSequence
- //-------------------------------------------------------
-
- public sealed class AsyncBatchOperateRecordSequenceExecutor : AsyncBatchExecutor
- {
- internal readonly BatchRecordSequenceListener listener;
- private readonly bool[] sent;
-
- public AsyncBatchOperateRecordSequenceExecutor
- (
- AsyncCluster cluster,
- BatchPolicy policy,
- BatchRecordSequenceListener listener,
- Key[] keys,
- Operation[] ops,
- BatchAttr attr
- ) : base(cluster, true)
- {
- this.listener = listener;
- this.sent = new bool[keys.Length];
-
- // Create commands.
- List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, attr.hasWrite, this);
- AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- tasks[count++] = new AsyncBatchOperateRecordSequenceCommand(this, cluster, batchNode, policy, keys, ops, sent, listener, attr);
- }
- // Dispatch commands to nodes.
- Execute(tasks);
- }
-
- public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite)
- {
- BatchRecord record = new BatchRecord(key, null, ae.Result, inDoubt, hasWrite);
- sent[index] = true;
- AsyncBatch.OnRecord(cluster, listener, record, index);
- }
-
- protected internal override void OnSuccess()
- {
- listener.OnSuccess();
- }
-
- protected internal override void OnFailure(AerospikeException ae)
- {
- listener.OnFailure(ae);
- }
- }
-
- sealed class AsyncBatchOperateRecordSequenceCommand : AsyncBatchCommand
- {
- internal readonly Key[] keys;
- internal readonly Operation[] ops;
- internal readonly bool[] sent;
- internal readonly BatchRecordSequenceListener listener;
- internal readonly BatchAttr attr;
-
- public AsyncBatchOperateRecordSequenceCommand
- (
- AsyncBatchExecutor parent,
- AsyncCluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- Key[] keys,
- Operation[] ops,
- bool[] sent,
- BatchRecordSequenceListener listener,
- BatchAttr attr
- ) : base(parent, cluster, batch, batchPolicy, ops != null)
- {
- this.keys = keys;
- this.ops = ops;
- this.sent = sent;
- this.listener = listener;
- this.attr = attr;
- }
-
- public AsyncBatchOperateRecordSequenceCommand(AsyncBatchOperateRecordSequenceCommand other) : base(other)
- {
- this.keys = other.keys;
- this.ops = other.ops;
- this.sent = other.sent;
- this.listener = other.listener;
- this.attr = other.attr;
- }
-
- protected internal override bool IsWrite()
- {
- return attr.hasWrite;
- }
-
- protected internal override void WriteBuffer()
- {
- SetBatchOperate(batchPolicy, keys, batch, null, ops, attr);
- }
-
- protected internal override void ParseRow()
- {
- SkipKey(fieldCount);
-
- Key keyOrig = keys[batchIndex];
- BatchRecord record;
-
- if (resultCode == 0)
- {
- record = new BatchRecord(keyOrig, ParseRecord(), attr.hasWrite);
- }
- else
- {
- record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite);
- }
- sent[batchIndex] = true;
- AsyncBatch.OnRecord(cluster, listener, record, batchIndex);
- }
-
- internal override void SetInDoubt(bool inDoubt)
- {
- // Set inDoubt for all unsent records, so the listener receives a full set of records.
- foreach (int index in batch.offsets)
- {
- if (!sent[index])
- {
- Key key = keys[index];
- BatchRecord record = new BatchRecord(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite);
- sent[index] = true;
- AsyncBatch.OnRecord(cluster, listener, record, index);
- }
- }
- }
-
- protected internal override AsyncCommand CloneCommand()
- {
- return new AsyncBatchOperateRecordSequenceCommand(this);
- }
-
- internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
- {
- return new AsyncBatchOperateRecordSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, ops, sent, listener, attr);
- }
-
- internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, keys, sent, sequenceAP, sequenceSC, batch, attr.hasWrite, parent);
- }
- }
-
- //-------------------------------------------------------
- // UDFArray
- //-------------------------------------------------------
-
- public sealed class AsyncBatchUDFArrayExecutor : AsyncBatchExecutor
- {
- internal readonly BatchRecordArrayListener listener;
- internal readonly BatchRecord[] recordArray;
-
- public AsyncBatchUDFArrayExecutor
- (
- AsyncCluster cluster,
- BatchPolicy policy,
- BatchRecordArrayListener listener,
- Key[] keys,
- string packageName,
- string functionName,
- byte[] argBytes,
- BatchAttr attr
- ) : base(cluster, true)
- {
- this.listener = listener;
- this.recordArray = new BatchRecord[keys.Length];
-
- for (int i = 0; i < keys.Length; i++)
- {
- this.recordArray[i] = new BatchRecord(keys[i], attr.hasWrite);
- }
-
- // Create commands.
- List batchNodes = BatchNode.GenerateList(cluster, policy, keys, recordArray, attr.hasWrite, this);
- AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- tasks[count++] = new AsyncBatchUDFArrayCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, recordArray, attr);
- }
- // Dispatch commands to nodes.
- Execute(tasks);
- }
-
- protected internal override void OnSuccess()
- {
- listener.OnSuccess(recordArray, GetStatus());
- }
-
- protected internal override void OnFailure(AerospikeException ae)
- {
- listener.OnFailure(recordArray, ae);
- }
- }
-
- public sealed class AsyncBatchUDFArrayCommand : AsyncBatchCommand
- {
- internal readonly Key[] keys;
- internal readonly string packageName;
- internal readonly string functionName;
- internal readonly byte[] argBytes;
- internal readonly BatchRecord[] records;
- internal readonly BatchAttr attr;
-
- public AsyncBatchUDFArrayCommand
- (
- AsyncBatchExecutor parent,
- AsyncCluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- Key[] keys,
- string packageName,
- string functionName,
- byte[] argBytes,
- BatchRecord[] records,
- BatchAttr attr
- ) : base(parent, cluster, batch, batchPolicy, false)
- {
- this.keys = keys;
- this.packageName = packageName;
- this.functionName = functionName;
- this.argBytes = argBytes;
- this.records = records;
- this.attr = attr;
- }
-
- public AsyncBatchUDFArrayCommand(AsyncBatchUDFArrayCommand other) : base(other)
- {
- this.keys = other.keys;
- this.packageName = other.packageName;
- this.functionName = other.functionName;
- this.argBytes = other.argBytes;
- this.records = other.records;
- this.attr = other.attr;
- }
-
- protected internal override bool IsWrite()
- {
- return attr.hasWrite;
- }
-
- protected internal override void WriteBuffer()
- {
- SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr);
- }
-
- protected internal override void ParseRow()
- {
- SkipKey(fieldCount);
-
- BatchRecord record = records[batchIndex];
-
- if (resultCode == 0)
- {
- record.SetRecord(ParseRecord());
- return;
- }
-
- if (resultCode == ResultCode.UDF_BAD_RESPONSE)
- {
- Record r = ParseRecord();
- string m = r.GetString("FAILURE");
-
- if (m != null)
- {
- // Need to store record because failure bin contains an error message.
- record.record = r;
- record.resultCode = resultCode;
- record.inDoubt = Command.BatchInDoubt(attr.hasWrite, commandSentCounter);
- parent.SetRowError();
- return;
- }
- }
-
- record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter));
- parent.SetRowError();
- }
-
- internal override void SetInDoubt(bool inDoubt)
- {
- if (!inDoubt || !attr.hasWrite)
- {
- return;
- }
-
- foreach (int index in batch.offsets)
- {
- BatchRecord record = records[index];
-
- if (record.resultCode == ResultCode.NO_RESPONSE)
- {
- record.inDoubt = inDoubt;
- }
- }
- }
-
- protected internal override AsyncCommand CloneCommand()
- {
- return new AsyncBatchUDFArrayCommand(this);
- }
-
- internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
- {
- return new AsyncBatchUDFArrayCommand(parent, cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr);
- }
-
- internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, parent);
- }
- }
-
- //-------------------------------------------------------
- // UDFSequence
- //-------------------------------------------------------
-
- public sealed class AsyncBatchUDFSequenceExecutor : AsyncBatchExecutor
- {
- internal readonly BatchRecordSequenceListener listener;
- private readonly bool[] sent;
-
- public AsyncBatchUDFSequenceExecutor
- (
- AsyncCluster cluster,
- BatchPolicy policy,
- BatchRecordSequenceListener listener,
- Key[] keys,
- string packageName,
- string functionName,
- byte[] argBytes,
- BatchAttr attr
- ) : base(cluster, true)
- {
- this.listener = listener;
- this.sent = new bool[keys.Length];
-
- // Create commands.
- List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, attr.hasWrite, this);
- AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- tasks[count++] = new AsyncBatchUDFSequenceCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, sent, listener, attr);
- }
- // Dispatch commands to nodes.
- Execute(tasks);
- }
-
- public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite)
- {
- BatchRecord record = new BatchRecord(key, null, ae.Result, inDoubt, hasWrite);
- sent[index] = true;
- AsyncBatch.OnRecord(cluster, listener, record, index);
- }
-
- protected internal override void OnSuccess()
- {
- listener.OnSuccess();
- }
-
- protected internal override void OnFailure(AerospikeException ae)
- {
- listener.OnFailure(ae);
- }
- }
-
- sealed class AsyncBatchUDFSequenceCommand : AsyncBatchCommand
- {
- internal readonly Key[] keys;
- internal readonly string packageName;
- internal readonly string functionName;
- internal readonly byte[] argBytes;
- internal readonly bool[] sent;
- internal readonly BatchRecordSequenceListener listener;
- internal readonly BatchAttr attr;
-
- public AsyncBatchUDFSequenceCommand
- (
- AsyncBatchExecutor parent,
- AsyncCluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- Key[] keys,
- string packageName,
- string functionName,
- byte[] argBytes,
- bool[] sent,
- BatchRecordSequenceListener listener,
- BatchAttr attr
- ) : base(parent, cluster, batch, batchPolicy, false)
- {
- this.keys = keys;
- this.packageName = packageName;
- this.functionName = functionName;
- this.argBytes = argBytes;
- this.sent = sent;
- this.listener = listener;
- this.attr = attr;
- }
-
- public AsyncBatchUDFSequenceCommand(AsyncBatchUDFSequenceCommand other) : base(other)
- {
- this.keys = other.keys;
- this.packageName = other.packageName;
- this.functionName = other.functionName;
- this.argBytes = other.argBytes;
- this.sent = other.sent;
- this.listener = other.listener;
- this.attr = other.attr;
- }
-
- protected internal override bool IsWrite()
- {
- return attr.hasWrite;
- }
-
- protected internal override void WriteBuffer()
- {
- SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr);
- }
-
- protected internal override void ParseRow()
- {
- SkipKey(fieldCount);
-
- Key keyOrig = keys[batchIndex];
- BatchRecord record;
-
- if (resultCode == 0)
- {
- record = new BatchRecord(keyOrig, ParseRecord(), attr.hasWrite);
- }
- else if (resultCode == ResultCode.UDF_BAD_RESPONSE)
- {
- Record r = ParseRecord();
- string m = r.GetString("FAILURE");
-
- if (m != null)
- {
- // Need to store record because failure bin contains an error message.
- record = new BatchRecord(keyOrig, r, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite);
- }
- else
- {
- record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite);
- }
- }
- else
- {
- record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite);
- }
- sent[batchIndex] = true;
- AsyncBatch.OnRecord(cluster, listener, record, batchIndex);
- }
-
- internal override void SetInDoubt(bool inDoubt)
- {
- // Set inDoubt for all unsent records, so the listener receives a full set of records.
- foreach (int index in batch.offsets)
- {
- if (!sent[index])
- {
- Key key = keys[index];
- BatchRecord record = new BatchRecord(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite);
- sent[index] = true;
- AsyncBatch.OnRecord(cluster, listener, record, index);
- }
- }
- }
-
- protected internal override AsyncCommand CloneCommand()
- {
- return new AsyncBatchUDFSequenceCommand(this);
- }
-
- internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
- {
- return new AsyncBatchUDFSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, sent, listener, attr);
- }
-
- internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, keys, sent, sequenceAP, sequenceSC, batch, attr.hasWrite, parent);
- }
- }
-
- //-------------------------------------------------------
- // Batch Base Executor
- //-------------------------------------------------------
-
- public abstract class AsyncBatchExecutor : IBatchStatus
- {
- private AerospikeException exception;
- private int max;
- private int count;
- private readonly bool hasResultCode;
- private bool error;
-
- public AsyncBatchExecutor(AsyncCluster cluster, bool hasResultCode)
- {
- this.hasResultCode = hasResultCode;
- cluster.AddTran();
- }
-
- public void Execute(AsyncBatchCommand[] commands)
- {
- max = commands.Length;
-
- foreach (AsyncBatchCommand command in commands)
- {
- command.Execute();
- }
- }
-
- public void Retry(AsyncMultiCommand[] commands)
- {
- lock (this)
- {
- // Adjust max for new commands minus failed command.
- max += commands.Length - 1;
- }
-
- foreach (AsyncBatchCommand command in commands)
- {
- command.ExecuteBatchRetry();
- }
- }
-
- public void ChildSuccess(AsyncNode node)
- {
- bool complete;
-
- lock (this)
- {
- complete = ++count == max;
- }
-
- if (complete)
- {
- Finish();
- }
- }
-
- public void ChildFailure(AerospikeException ae)
- {
- bool complete;
-
- lock (this)
- {
- if (exception == null)
- {
- exception = ae;
- }
- complete = ++count == max;
- }
-
- if (complete)
- {
- Finish();
- }
- }
-
- private void Finish()
- {
- if (exception == null)
- {
- OnSuccess();
- }
- else
- {
- OnFailure(exception);
- }
- }
-
- public virtual void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite)
- {
- // Only used in executors with sequence listeners.
- // These executors will override this method.
- }
-
- public void BatchKeyError(AerospikeException ae)
- {
- error = true;
-
- if (!hasResultCode)
- {
- // Legacy batch read commands that do not store a key specific resultCode.
- // Store exception which will be passed to the listener on batch completion.
- if (exception == null)
- {
- exception = ae;
- }
- }
- }
-
- public void SetRowError()
- {
- // Indicate that a key specific error occurred.
- error = true;
- }
-
- public bool GetStatus()
- {
- return !error;
- }
-
- protected internal abstract void OnSuccess();
- protected internal abstract void OnFailure(AerospikeException ae);
- }
-
- //-------------------------------------------------------
- // Batch Base Command
- //-------------------------------------------------------
-
- public abstract class AsyncBatchCommand : AsyncMultiCommand
- {
- internal readonly AsyncBatchExecutor parent;
- internal readonly BatchNode batch;
- internal readonly BatchPolicy batchPolicy;
- internal uint sequenceAP;
- internal uint sequenceSC;
-
- public AsyncBatchCommand(AsyncBatchExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy batchPolicy, bool isOperation)
- : base(cluster, batchPolicy, (AsyncNode)batch.node, isOperation)
- {
- this.parent = parent;
- this.batch = batch;
- this.batchPolicy = batchPolicy;
- }
-
- public AsyncBatchCommand(AsyncBatchCommand other) : base(other)
- {
- this.parent = other.parent;
- this.batch = other.batch;
- this.batchPolicy = other.batchPolicy;
- this.sequenceAP = other.sequenceAP;
- this.sequenceSC = other.sequenceSC;
- }
-
- protected override Latency.LatencyType GetLatencyType()
- {
- return Latency.LatencyType.BATCH;
- }
-
- protected internal override bool PrepareRetry(bool timeout)
- {
- if (!(policy.replica == Replica.SEQUENCE || policy.replica == Replica.PREFER_RACK))
- {
- // Perform regular retry to same node.
- return true;
- }
-
- sequenceAP++;
-
- if (! timeout || policy.readModeSC != ReadModeSC.LINEARIZE) {
- sequenceSC++;
- }
- return false;
- }
-
- protected internal override bool RetryBatch()
- {
- List batchNodes = null;
-
- try
- {
- // Retry requires keys for this node to be split among other nodes.
- // This can cause an exponential number of commands.
- batchNodes = GenerateBatchNodes();
-
- if (batchNodes.Count == 1 && batchNodes[0].node == batch.node)
- {
- // Batch node is the same. Go through normal retry.
- // Normal retries reuse eventArgs, so PutBackArgsOnError()
- // should not be called here.
- return false;
- }
-
- cluster.AddRetries(batchNodes.Count);
- }
- catch (Exception)
- {
- // Close original command.
- base.ReleaseBuffer();
- throw;
- }
-
- // Close original command.
- base.ReleaseBuffer();
-
- // Execute new commands.
- AsyncBatchCommand[] cmds = new AsyncBatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- AsyncBatchCommand cmd = CreateCommand(batchNode);
- cmd.sequenceAP = sequenceAP;
- cmd.sequenceSC = sequenceSC;
- cmd.SetBatchRetry(this);
- cmds[count++] = cmd;
- }
-
- // Retry new commands.
- parent.Retry(cmds);
-
- // Return true so original batch command is stopped.
- return true;
- }
-
- protected internal override void OnSuccess()
- {
- parent.ChildSuccess(node);
- }
-
- protected internal override void OnFailure(AerospikeException e)
- {
- SetInDoubt(e.InDoubt);
- parent.ChildFailure(e);
- }
-
- internal virtual void SetInDoubt(bool inDoubt)
- {
- // Do nothing by default. Batch writes will override this method.
- }
-
- internal abstract AsyncBatchCommand CreateCommand(BatchNode batchNode);
- internal abstract List GenerateBatchNodes();
- }
-
- internal class AsyncBatch
- {
- internal static void OnRecord(Cluster cluster, BatchRecordSequenceListener listener, BatchRecord record, int index)
- {
- try
- {
- listener.OnRecord(record, index);
- }
- catch (Exception e)
- {
- Log.Error(cluster.context, "Unexpected exception from OnRecord(): " + Util.GetErrorMessage(e));
- }
- }
- }
-}
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+namespace Aerospike.Client
+{
+ //-------------------------------------------------------
+ // ReadList
+ //-------------------------------------------------------
+
+ public sealed class AsyncBatchReadListExecutor : AsyncBatchExecutor
+ {
+ private readonly BatchListListener listener;
+ private readonly List records;
+
+ public AsyncBatchReadListExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ BatchListListener listener,
+ List records
+ ) : base(cluster, true)
+ {
+ this.listener = listener;
+ this.records = records;
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, records, this);
+ AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ commands[count++] = new AsyncBatchReadListCommand(this, cluster, batchNode, policy, records);
+ }
+ this.commands = commands;
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess(records);
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(ae);
+ }
+ }
+
+ sealed class AsyncBatchReadListCommand : AsyncBatchCommand
+ {
+ private readonly List records;
+
+ public AsyncBatchReadListCommand
+ (
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ List records
+ ) : base(parent, cluster, batch, batchPolicy, true)
+ {
+ this.records = records;
+ }
+
+ public AsyncBatchReadListCommand(AsyncBatchReadListCommand other) : base(other)
+ {
+ this.records = other.records;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ if (batch.node.HasBatchAny)
+ {
+ SetBatchOperate(batchPolicy, records, batch);
+ }
+ else
+ {
+ SetBatchRead(batchPolicy, records, batch);
+ }
+ }
+
+ protected internal override void ParseRow()
+ {
+ BatchRead record = records[batchIndex];
+
+ ParseFieldsRead(record.key);
+
+ if (resultCode == 0)
+ {
+ record.SetRecord(ParseRecord());
+ }
+ else
+ {
+ record.SetError(resultCode, false);
+ }
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchReadListCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchReadListCommand(parent, cluster, batchNode, batchPolicy, records);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent);
+ }
+ }
+
+ //-------------------------------------------------------
+ // ReadSequence
+ //-------------------------------------------------------
+
+ public sealed class AsyncBatchReadSequenceExecutor : AsyncBatchExecutor
+ {
+ private readonly BatchSequenceListener listener;
+
+ public AsyncBatchReadSequenceExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ BatchSequenceListener listener,
+ List records
+ ) : base(cluster, true)
+ {
+ this.listener = listener;
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, records, this);
+ AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ commands[count++] = new AsyncBatchReadSequenceCommand(this, cluster, batchNode, policy, listener, records);
+ }
+ this.commands = commands;
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess();
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(ae);
+ }
+ }
+
+ sealed class AsyncBatchReadSequenceCommand : AsyncBatchCommand
+ {
+ private readonly BatchSequenceListener listener;
+ private readonly List records;
+
+ public AsyncBatchReadSequenceCommand
+ (
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ BatchSequenceListener listener,
+ List records
+ ) : base(parent, cluster, batch, batchPolicy, true)
+ {
+ this.listener = listener;
+ this.records = records;
+ }
+
+ public AsyncBatchReadSequenceCommand(AsyncBatchReadSequenceCommand other) : base(other)
+ {
+ this.listener = other.listener;
+ this.records = other.records;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ if (batch.node.HasBatchAny)
+ {
+ SetBatchOperate(batchPolicy, records, batch);
+ }
+ else
+ {
+ SetBatchRead(batchPolicy, records, batch);
+ }
+ }
+
+ protected internal override void ParseRow()
+ {
+ BatchRead record = records[batchIndex];
+
+ ParseFieldsRead(record.key);
+
+ if (resultCode == 0)
+ {
+ record.SetRecord(ParseRecord());
+ }
+ else
+ {
+ record.SetError(resultCode, false);
+ }
+ listener.OnRecord(record);
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchReadSequenceCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchReadSequenceCommand(parent, cluster, batchNode, batchPolicy, listener, records);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent);
+ }
+ }
+
+ //-------------------------------------------------------
+ // GetArray
+ //-------------------------------------------------------
+
+ public sealed class AsyncBatchGetArrayExecutor : AsyncBatchExecutor
+ {
+ private readonly Key[] keys;
+ private readonly Record[] records;
+ private readonly RecordArrayListener listener;
+
+ public AsyncBatchGetArrayExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ RecordArrayListener listener,
+ Key[] keys,
+ string[] binNames,
+ Operation[] ops,
+ int readAttr,
+ bool isOperation
+ ) : base(cluster, false)
+ {
+ this.keys = keys;
+ this.records = new Record[keys.Length];
+ this.listener = listener;
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this);
+ AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ commands[count++] = new AsyncBatchGetArrayCommand(this, cluster, batchNode, policy, keys, binNames, ops, records, readAttr, isOperation);
+ }
+ this.commands = commands;
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess(keys, records);
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(new AerospikeException.BatchRecords(records, ae));
+ }
+ }
+
+ sealed class AsyncBatchGetArrayCommand : AsyncBatchCommand
+ {
+ private readonly Key[] keys;
+ private readonly string[] binNames;
+ private readonly Operation[] ops;
+ private readonly Record[] records;
+ private readonly int readAttr;
+
+ public AsyncBatchGetArrayCommand
+ (
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ string[] binNames,
+ Operation[] ops,
+ Record[] records,
+ int readAttr,
+ bool isOperation
+ ) : base(parent, cluster, batch, batchPolicy, isOperation)
+ {
+ this.keys = keys;
+ this.binNames = binNames;
+ this.ops = ops;
+ this.records = records;
+ this.readAttr = readAttr;
+ }
+
+ public AsyncBatchGetArrayCommand(AsyncBatchGetArrayCommand other) : base(other)
+ {
+ this.keys = other.keys;
+ this.binNames = other.binNames;
+ this.ops = other.ops;
+ this.records = other.records;
+ this.readAttr = other.readAttr;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ if (batch.node.HasBatchAny)
+ {
+ BatchAttr attr = new(batchPolicy, readAttr, ops);
+ SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr);
+ }
+ else
+ {
+ SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr);
+ }
+ }
+
+ protected internal override void ParseRow()
+ {
+ ParseFieldsRead(keys[batchIndex]);
+
+ if (resultCode == 0)
+ {
+ records[batchIndex] = ParseRecord();
+ }
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchGetArrayCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchGetArrayCommand(parent, cluster, batchNode, batchPolicy, keys, binNames, ops, records, readAttr, isOperation);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent);
+ }
+ }
+
+ //-------------------------------------------------------
+ // GetSequence
+ //-------------------------------------------------------
+
+ public sealed class AsyncBatchGetSequenceExecutor : AsyncBatchExecutor
+ {
+ private readonly RecordSequenceListener listener;
+
+ public AsyncBatchGetSequenceExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ RecordSequenceListener listener,
+ Key[] keys,
+ string[] binNames,
+ Operation[] ops,
+ int readAttr,
+ bool isOperation
+ ) : base(cluster, false)
+ {
+ this.listener = listener;
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this);
+ AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ commands[count++] = new AsyncBatchGetSequenceCommand(this, cluster, batchNode, policy, keys, binNames, ops, listener, readAttr, isOperation);
+ }
+ this.commands = commands;
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess();
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(ae);
+ }
+ }
+
+ sealed class AsyncBatchGetSequenceCommand : AsyncBatchCommand
+ {
+ private readonly Key[] keys;
+ private readonly string[] binNames;
+ private readonly Operation[] ops;
+ private readonly RecordSequenceListener listener;
+ private readonly int readAttr;
+
+ public AsyncBatchGetSequenceCommand
+ (
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ string[] binNames,
+ Operation[] ops,
+ RecordSequenceListener listener,
+ int readAttr,
+ bool isOperation
+ ) : base(parent, cluster, batch, batchPolicy, isOperation)
+ {
+ this.keys = keys;
+ this.binNames = binNames;
+ this.ops = ops;
+ this.listener = listener;
+ this.readAttr = readAttr;
+ }
+
+ public AsyncBatchGetSequenceCommand(AsyncBatchGetSequenceCommand other) : base(other)
+ {
+ this.keys = other.keys;
+ this.binNames = other.binNames;
+ this.ops = other.ops;
+ this.listener = other.listener;
+ this.readAttr = other.readAttr;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ if (batch.node.HasBatchAny)
+ {
+ BatchAttr attr = new(batchPolicy, readAttr, ops);
+ SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr);
+ }
+ else
+ {
+ SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr);
+ }
+ }
+
+ protected internal override void ParseRow()
+ {
+ Key keyOrig = keys[batchIndex];
+
+ ParseFieldsRead(keyOrig);
+
+ if (resultCode == 0)
+ {
+ Record record = ParseRecord();
+ listener.OnRecord(keyOrig, record);
+ }
+ else
+ {
+ listener.OnRecord(keyOrig, null);
+ }
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchGetSequenceCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchGetSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, binNames, ops, listener, readAttr, isOperation);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent);
+ }
+ }
+
+ //-------------------------------------------------------
+ // ExistsArray
+ //-------------------------------------------------------
+
+ public sealed class AsyncBatchExistsArrayExecutor : AsyncBatchExecutor
+ {
+ private readonly Key[] keys;
+ private readonly bool[] existsArray;
+ private readonly ExistsArrayListener listener;
+
+ public AsyncBatchExistsArrayExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ Key[] keys,
+ ExistsArrayListener listener
+ ) : base(cluster,false)
+ {
+ this.keys = keys;
+ this.existsArray = new bool[keys.Length];
+ this.listener = listener;
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this);
+ AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ commands[count++] = new AsyncBatchExistsArrayCommand(this, cluster, batchNode, policy, keys, existsArray);
+ }
+ this.commands = commands;
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess(keys, existsArray);
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(new AerospikeException.BatchExists(existsArray, ae));
+ }
+ }
+
+ sealed class AsyncBatchExistsArrayCommand : AsyncBatchCommand
+ {
+ private readonly Key[] keys;
+ private readonly bool[] existsArray;
+
+ public AsyncBatchExistsArrayCommand
+ (
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ bool[] existsArray
+ ) : base(parent, cluster, batch, batchPolicy, false)
+ {
+ this.keys = keys;
+ this.existsArray = existsArray;
+ }
+
+ public AsyncBatchExistsArrayCommand(AsyncBatchExistsArrayCommand other) : base(other)
+ {
+ this.keys = other.keys;
+ this.existsArray = other.existsArray;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ if (batch.node.HasBatchAny)
+ {
+ BatchAttr attr = new(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA);
+ SetBatchOperate(batchPolicy, keys, batch, null, null, attr);
+ }
+ else
+ {
+ SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA);
+ }
+ }
+
+ protected internal override void ParseRow()
+ {
+ ParseFieldsRead(keys[batchIndex]);
+ existsArray[batchIndex] = resultCode == 0;
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchExistsArrayCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchExistsArrayCommand(parent, cluster, batchNode, batchPolicy, keys, existsArray);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent);
+ }
+ }
+
+ //-------------------------------------------------------
+ // ExistsSequence
+ //-------------------------------------------------------
+
+ public sealed class AsyncBatchExistsSequenceExecutor : AsyncBatchExecutor
+ {
+ private readonly ExistsSequenceListener listener;
+
+ public AsyncBatchExistsSequenceExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ Key[] keys,
+ ExistsSequenceListener listener
+ ) : base(cluster, false)
+ {
+ this.listener = listener;
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this);
+ AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ commands[count++] = new AsyncBatchExistsSequenceCommand(this, cluster, batchNode, policy, keys, listener);
+ }
+ this.commands = commands;
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess();
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(ae);
+ }
+
+ }
+
+ sealed class AsyncBatchExistsSequenceCommand : AsyncBatchCommand
+ {
+ private readonly Key[] keys;
+ private readonly ExistsSequenceListener listener;
+
+ public AsyncBatchExistsSequenceCommand
+ (
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ ExistsSequenceListener listener
+ ) : base(parent, cluster, batch, batchPolicy, false)
+ {
+ this.keys = keys;
+ this.listener = listener;
+ }
+
+ public AsyncBatchExistsSequenceCommand(AsyncBatchExistsSequenceCommand other) : base(other)
+ {
+ this.keys = other.keys;
+ this.listener = other.listener;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ if (batch.node.HasBatchAny)
+ {
+ BatchAttr attr = new(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA);
+ SetBatchOperate(batchPolicy, keys, batch, null, null, attr);
+ }
+ else
+ {
+ SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA);
+ }
+ }
+
+ protected internal override void ParseRow()
+ {
+ Key keyOrig = keys[batchIndex];
+ ParseFieldsRead(keyOrig);
+ listener.OnExists(keyOrig, resultCode == 0);
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchExistsSequenceCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchExistsSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, listener);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent);
+ }
+ }
+
+ //-------------------------------------------------------
+ // OperateList
+ //-------------------------------------------------------
+
+ public sealed class AsyncBatchOperateListExecutor : AsyncBatchExecutor
+ {
+ internal readonly BatchOperateListListener listener;
+ internal readonly List records;
+
+ public AsyncBatchOperateListExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ BatchOperateListListener listener,
+ List records
+ ) : base(cluster, true)
+ {
+ this.listener = listener;
+ this.records = records;
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, records, this);
+ AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ tasks[count++] = new AsyncBatchOperateListCommand(this, cluster, batchNode, policy, records);
+ }
+ this.commands = tasks;
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess(records, GetStatus());
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(ae);
+ }
+ }
+
+ sealed class AsyncBatchOperateListCommand : AsyncBatchCommand
+ {
+ internal readonly List records;
+
+ public AsyncBatchOperateListCommand
+ (
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ List records
+ ) : base(parent, cluster, batch, batchPolicy, true)
+ {
+ this.records = records;
+ }
+
+ public AsyncBatchOperateListCommand(AsyncBatchOperateListCommand other) : base(other)
+ {
+ this.records = other.records;
+ }
+
+ protected internal override bool IsWrite()
+ {
+ // This method is only called to set inDoubt on node level errors.
+ // SetError() will filter out reads when setting record level inDoubt.
+ return true;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetBatchOperate(batchPolicy, records, batch);
+ }
+
+ protected internal override void ParseRow()
+ {
+ BatchRecord record = records[batchIndex];
+
+ ParseFields(record.key, record.hasWrite);
+
+ if (resultCode == 0)
+ {
+ record.SetRecord(ParseRecord());
+ return;
+ }
+
+ if (resultCode == ResultCode.UDF_BAD_RESPONSE)
+ {
+ Record r = ParseRecord();
+ string m = r.GetString("FAILURE");
+
+ if (m != null)
+ {
+ // Need to store record because failure bin contains an error message.
+ record.record = r;
+ record.resultCode = resultCode;
+ record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter);
+ parent.SetRowError();
+ return;
+ }
+ }
+
+ record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter));
+ parent.SetRowError();
+ }
+
+ internal override void SetInDoubt(bool inDoubt)
+ {
+ if (!inDoubt)
+ {
+ return;
+ }
+
+ foreach (int index in batch.offsets)
+ {
+ BatchRecord record = records[index];
+
+ if (record.resultCode == ResultCode.NO_RESPONSE)
+ {
+ record.inDoubt = record.hasWrite;
+
+ if (record.inDoubt && policy.Txn != null)
+ {
+ policy.Txn.OnWriteInDoubt(record.key);
+ }
+ }
+ }
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchOperateListCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchOperateListCommand(parent, cluster, batchNode, batchPolicy, records);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent);
+ }
+ }
+
+ //-------------------------------------------------------
+ // OperateSequence
+ //-------------------------------------------------------
+
+ public sealed class AsyncBatchOperateSequenceExecutor : AsyncBatchExecutor
+ {
+ internal readonly BatchRecordSequenceListener listener;
+
+ public AsyncBatchOperateSequenceExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ BatchRecordSequenceListener listener,
+ List records
+ ) : base(cluster, true)
+ {
+ this.listener = listener;
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, records, this);
+ AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ tasks[count++] = new AsyncBatchOperateSequenceCommand(this, cluster, batchNode, policy, listener, records);
+ }
+ this.commands = tasks;
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess();
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(ae);
+ }
+ }
+
+ sealed class AsyncBatchOperateSequenceCommand : AsyncBatchCommand
+ {
+ internal readonly BatchRecordSequenceListener listener;
+ internal readonly List records;
+
+ public AsyncBatchOperateSequenceCommand
+ (
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ BatchRecordSequenceListener listener,
+ List records
+ ) : base(parent, cluster, batch, batchPolicy, true)
+ {
+ this.listener = listener;
+ this.records = records;
+ }
+
+ public AsyncBatchOperateSequenceCommand(AsyncBatchOperateSequenceCommand other) : base(other)
+ {
+ this.listener = other.listener;
+ this.records = other.records;
+ }
+
+ protected internal override bool IsWrite()
+ {
+ // This method is only called to set inDoubt on node level errors.
+ // SetError() will filter out reads when setting record level inDoubt.
+ return true;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetBatchOperate(batchPolicy, records, batch);
+ }
+
+ protected internal override void ParseRow()
+ {
+ BatchRecord record = records[batchIndex];
+
+ ParseFields(record.key, record.hasWrite);
+
+ if (resultCode == 0)
+ {
+ record.SetRecord(ParseRecord());
+ }
+ else if (resultCode == ResultCode.UDF_BAD_RESPONSE)
+ {
+ Record r = ParseRecord();
+ string m = r.GetString("FAILURE");
+
+ if (m != null)
+ {
+ // Need to store record because failure bin contains an error message.
+ record.record = r;
+ record.resultCode = resultCode;
+ record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter);
+ }
+ else
+ {
+ record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter));
+ }
+ }
+ else
+ {
+ record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter));
+ }
+ AsyncBatch.OnRecord(cluster, listener, record, batchIndex);
+ }
+
+ internal override void SetInDoubt(bool inDoubt)
+ {
+ if (!inDoubt)
+ {
+ return;
+ }
+
+ foreach (int index in batch.offsets)
+ {
+ BatchRecord record = records[index];
+
+ if (record.resultCode == ResultCode.NO_RESPONSE)
+ {
+ // Set inDoubt, but do not call OnRecord() because user already has access to full
+ // BatchRecord list and can examine each record for inDoubt when the exception occurs.
+ record.inDoubt = record.hasWrite;
+
+ if (record.inDoubt && policy.Txn != null)
+ {
+ policy.Txn.OnWriteInDoubt(record.key);
+ }
+ }
+ }
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchOperateSequenceCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchOperateSequenceCommand(parent, cluster, batchNode, batchPolicy, listener, records);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent);
+ }
+ }
+
+ //-------------------------------------------------------
+ // OperateRecordArray
+ //-------------------------------------------------------
+
+ public sealed class AsyncBatchOperateRecordArrayExecutor : AsyncBatchExecutor
+ {
+ internal readonly BatchRecordArrayListener listener;
+ internal readonly BatchRecord[] records;
+
+ public AsyncBatchOperateRecordArrayExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ BatchRecordArrayListener listener,
+ Key[] keys,
+ Operation[] ops,
+ BatchAttr attr
+ ) : base(cluster, true)
+ {
+ this.listener = listener;
+ this.records = new BatchRecord[keys.Length];
+
+ for (int i = 0; i < keys.Length; i++)
+ {
+ this.records[i] = new BatchRecord(keys[i], attr.hasWrite);
+ }
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, attr.hasWrite, this);
+ AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ tasks[count++] = new AsyncBatchOperateRecordArrayCommand(this, cluster, batchNode, policy, keys, ops, records, attr);
+ }
+ this.commands = tasks;
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess(records, GetStatus());
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(records, ae);
+ }
+ }
+
+ sealed class AsyncBatchOperateRecordArrayCommand : AsyncBatchCommand
+ {
+ internal readonly Key[] keys;
+ internal readonly Operation[] ops;
+ internal readonly BatchRecord[] records;
+ internal readonly BatchAttr attr;
+
+ public AsyncBatchOperateRecordArrayCommand
+ (
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ Operation[] ops,
+ BatchRecord[] records,
+ BatchAttr attr
+ ) : base(parent, cluster, batch, batchPolicy, ops != null)
+ {
+ this.keys = keys;
+ this.ops = ops;
+ this.records = records;
+ this.attr = attr;
+ }
+
+ public AsyncBatchOperateRecordArrayCommand(AsyncBatchOperateRecordArrayCommand other) : base(other)
+ {
+ this.keys = other.keys;
+ this.ops = other.ops;
+ this.records = other.records;
+ this.attr = other.attr;
+ }
+
+ protected internal override bool IsWrite()
+ {
+ return attr.hasWrite;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetBatchOperate(batchPolicy, keys, batch, null, ops, attr);
+ }
+
+ protected internal override void ParseRow()
+ {
+ BatchRecord record = records[batchIndex];
+
+ ParseFields(record.key, record.hasWrite);
+
+ if (resultCode == 0)
+ {
+ record.SetRecord(ParseRecord());
+ }
+ else
+ {
+ record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter));
+ parent.SetRowError();
+ }
+ }
+
+ internal override void SetInDoubt(bool inDoubt)
+ {
+ if (!inDoubt || !attr.hasWrite)
+ {
+ return;
+ }
+
+ foreach (int index in batch.offsets)
+ {
+ BatchRecord record = records[index];
+
+ if (record.resultCode == ResultCode.NO_RESPONSE)
+ {
+ record.inDoubt = true;
+
+ policy.Txn?.OnWriteInDoubt(record.key);
+ }
+ }
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchOperateRecordArrayCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchOperateRecordArrayCommand(parent, cluster, batchNode, batchPolicy, keys, ops, records, attr);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, parent);
+ }
+ }
+
+ //-------------------------------------------------------
+ // OperateRecordSequence
+ //-------------------------------------------------------
+
+ public sealed class AsyncBatchOperateRecordSequenceExecutor : AsyncBatchExecutor
+ {
+ internal readonly BatchRecordSequenceListener listener;
+ private readonly bool[] sent;
+
+ public AsyncBatchOperateRecordSequenceExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ BatchRecordSequenceListener listener,
+ Key[] keys,
+ Operation[] ops,
+ BatchAttr attr
+ ) : base(cluster, true)
+ {
+ this.listener = listener;
+ this.sent = new bool[keys.Length];
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, attr.hasWrite, this);
+ AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ tasks[count++] = new AsyncBatchOperateRecordSequenceCommand(this, cluster, batchNode, policy, keys, ops, sent, listener, attr);
+ }
+ this.commands = tasks;
+ }
+
+ public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite)
+ {
+ BatchRecord record = new(key, null, ae.Result, inDoubt, hasWrite);
+ sent[index] = true;
+ AsyncBatch.OnRecord(cluster, listener, record, index);
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess();
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(ae);
+ }
+ }
+
+ sealed class AsyncBatchOperateRecordSequenceCommand : AsyncBatchCommand
+ {
+ internal readonly Key[] keys;
+ internal readonly Operation[] ops;
+ internal readonly bool[] sent;
+ internal readonly BatchRecordSequenceListener listener;
+ internal readonly BatchAttr attr;
+
+ public AsyncBatchOperateRecordSequenceCommand
+ (
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ Operation[] ops,
+ bool[] sent,
+ BatchRecordSequenceListener listener,
+ BatchAttr attr
+ ) : base(parent, cluster, batch, batchPolicy, ops != null)
+ {
+ this.keys = keys;
+ this.ops = ops;
+ this.sent = sent;
+ this.listener = listener;
+ this.attr = attr;
+ }
+
+ public AsyncBatchOperateRecordSequenceCommand(AsyncBatchOperateRecordSequenceCommand other) : base(other)
+ {
+ this.keys = other.keys;
+ this.ops = other.ops;
+ this.sent = other.sent;
+ this.listener = other.listener;
+ this.attr = other.attr;
+ }
+
+ protected internal override bool IsWrite()
+ {
+ return attr.hasWrite;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetBatchOperate(batchPolicy, keys, batch, null, ops, attr);
+ }
+
+ protected internal override void ParseRow()
+ {
+ Key keyOrig = keys[batchIndex];
+
+ ParseFields(keyOrig, attr.hasWrite);
+
+ BatchRecord record;
+
+ if (resultCode == 0)
+ {
+ record = new BatchRecord(keyOrig, ParseRecord(), attr.hasWrite);
+ }
+ else
+ {
+ record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite);
+ }
+ sent[batchIndex] = true;
+ AsyncBatch.OnRecord(cluster, listener, record, batchIndex);
+ }
+
+ internal override void SetInDoubt(bool inDoubt)
+ {
+ // Set inDoubt for all unsent records, so the listener receives a full set of records.
+ foreach (int index in batch.offsets)
+ {
+ if (!sent[index])
+ {
+ Key key = keys[index];
+ BatchRecord record = new(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite);
+ sent[index] = true;
+
+ if (record.inDoubt && policy.Txn != null)
+ {
+ policy.Txn.OnWriteInDoubt(key);
+ }
+
+ AsyncBatch.OnRecord(cluster, listener, record, index);
+ }
+ }
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchOperateRecordSequenceCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchOperateRecordSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, ops, sent, listener, attr);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, sent, sequenceAP, sequenceSC, batch, attr.hasWrite, parent);
+ }
+ }
+
+ //-------------------------------------------------------
+ // UDFArray
+ //-------------------------------------------------------
+
+ public sealed class AsyncBatchUDFArrayExecutor : AsyncBatchExecutor
+ {
+ internal readonly BatchRecordArrayListener listener;
+ internal readonly BatchRecord[] recordArray;
+
+ public AsyncBatchUDFArrayExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ BatchRecordArrayListener listener,
+ Key[] keys,
+ string packageName,
+ string functionName,
+ byte[] argBytes,
+ BatchAttr attr
+ ) : base(cluster, true)
+ {
+ this.listener = listener;
+ this.recordArray = new BatchRecord[keys.Length];
+
+ for (int i = 0; i < keys.Length; i++)
+ {
+ this.recordArray[i] = new BatchRecord(keys[i], attr.hasWrite);
+ }
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, keys, recordArray, attr.hasWrite, this);
+ AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ tasks[count++] = new AsyncBatchUDFArrayCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, recordArray, attr);
+ }
+ this.commands = tasks;
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess(recordArray, GetStatus());
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(recordArray, ae);
+ }
+ }
+
+ public sealed class AsyncBatchUDFArrayCommand : AsyncBatchCommand
+ {
+ internal readonly Key[] keys;
+ internal readonly string packageName;
+ internal readonly string functionName;
+ internal readonly byte[] argBytes;
+ internal readonly BatchRecord[] records;
+ internal readonly BatchAttr attr;
+
+ public AsyncBatchUDFArrayCommand
+ (
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ string packageName,
+ string functionName,
+ byte[] argBytes,
+ BatchRecord[] records,
+ BatchAttr attr
+ ) : base(parent, cluster, batch, batchPolicy, false)
+ {
+ this.keys = keys;
+ this.packageName = packageName;
+ this.functionName = functionName;
+ this.argBytes = argBytes;
+ this.records = records;
+ this.attr = attr;
+ }
+
+ public AsyncBatchUDFArrayCommand(AsyncBatchUDFArrayCommand other) : base(other)
+ {
+ this.keys = other.keys;
+ this.packageName = other.packageName;
+ this.functionName = other.functionName;
+ this.argBytes = other.argBytes;
+ this.records = other.records;
+ this.attr = other.attr;
+ }
+
+ protected internal override bool IsWrite()
+ {
+ return attr.hasWrite;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr);
+ }
+
+ protected internal override void ParseRow()
+ {
+ BatchRecord record = records[batchIndex];
+
+ ParseFields(record.key, record.hasWrite);
+
+ if (resultCode == 0)
+ {
+ record.SetRecord(ParseRecord());
+ return;
+ }
+
+ if (resultCode == ResultCode.UDF_BAD_RESPONSE)
+ {
+ Record r = ParseRecord();
+ string m = r.GetString("FAILURE");
+
+ if (m != null)
+ {
+ // Need to store record because failure bin contains an error message.
+ record.record = r;
+ record.resultCode = resultCode;
+ record.inDoubt = Command.BatchInDoubt(attr.hasWrite, commandSentCounter);
+ parent.SetRowError();
+ return;
+ }
+ }
+
+ record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter));
+ parent.SetRowError();
+ }
+
+ internal override void SetInDoubt(bool inDoubt)
+ {
+ if (!inDoubt || !attr.hasWrite)
+ {
+ return;
+ }
+
+ foreach (int index in batch.offsets)
+ {
+ BatchRecord record = records[index];
+
+ if (record.resultCode == ResultCode.NO_RESPONSE)
+ {
+ record.inDoubt = true;
+
+ policy.Txn?.OnWriteInDoubt(record.key);
+ }
+ }
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchUDFArrayCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchUDFArrayCommand(parent, cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, parent);
+ }
+ }
+
+ //-------------------------------------------------------
+ // UDFSequence
+ //-------------------------------------------------------
+
+ public sealed class AsyncBatchUDFSequenceExecutor : AsyncBatchExecutor
+ {
+ internal readonly BatchRecordSequenceListener listener;
+ private readonly bool[] sent;
+
+ public AsyncBatchUDFSequenceExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ BatchRecordSequenceListener listener,
+ Key[] keys,
+ string packageName,
+ string functionName,
+ byte[] argBytes,
+ BatchAttr attr
+ ) : base(cluster, true)
+ {
+ this.listener = listener;
+ this.sent = new bool[keys.Length];
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, attr.hasWrite, this);
+ AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ tasks[count++] = new AsyncBatchUDFSequenceCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, sent, listener, attr);
+ }
+ this.commands = tasks;
+ }
+
+ public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite)
+ {
+ BatchRecord record = new(key, null, ae.Result, inDoubt, hasWrite);
+ sent[index] = true;
+ AsyncBatch.OnRecord(cluster, listener, record, index);
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess();
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(ae);
+ }
+ }
+
+ sealed class AsyncBatchUDFSequenceCommand : AsyncBatchCommand
+ {
+ internal readonly Key[] keys;
+ internal readonly string packageName;
+ internal readonly string functionName;
+ internal readonly byte[] argBytes;
+ internal readonly bool[] sent;
+ internal readonly BatchRecordSequenceListener listener;
+ internal readonly BatchAttr attr;
+
+ public AsyncBatchUDFSequenceCommand
+ (
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ string packageName,
+ string functionName,
+ byte[] argBytes,
+ bool[] sent,
+ BatchRecordSequenceListener listener,
+ BatchAttr attr
+ ) : base(parent, cluster, batch, batchPolicy, false)
+ {
+ this.keys = keys;
+ this.packageName = packageName;
+ this.functionName = functionName;
+ this.argBytes = argBytes;
+ this.sent = sent;
+ this.listener = listener;
+ this.attr = attr;
+ }
+
+ public AsyncBatchUDFSequenceCommand(AsyncBatchUDFSequenceCommand other) : base(other)
+ {
+ this.keys = other.keys;
+ this.packageName = other.packageName;
+ this.functionName = other.functionName;
+ this.argBytes = other.argBytes;
+ this.sent = other.sent;
+ this.listener = other.listener;
+ this.attr = other.attr;
+ }
+
+ protected internal override bool IsWrite()
+ {
+ return attr.hasWrite;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr);
+ }
+
+ protected internal override void ParseRow()
+ {
+ Key keyOrig = keys[batchIndex];
+
+ ParseFields(keyOrig, attr.hasWrite);
+
+ BatchRecord record;
+
+ if (resultCode == 0)
+ {
+ record = new BatchRecord(keyOrig, ParseRecord(), attr.hasWrite);
+ }
+ else if (resultCode == ResultCode.UDF_BAD_RESPONSE)
+ {
+ Record r = ParseRecord();
+ string m = r.GetString("FAILURE");
+
+ if (m != null)
+ {
+ // Need to store record because failure bin contains an error message.
+ record = new BatchRecord(keyOrig, r, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite);
+ }
+ else
+ {
+ record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite);
+ }
+ }
+ else
+ {
+ record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite);
+ }
+ sent[batchIndex] = true;
+ AsyncBatch.OnRecord(cluster, listener, record, batchIndex);
+ }
+
+ internal override void SetInDoubt(bool inDoubt)
+ {
+ // Set inDoubt for all unsent records, so the listener receives a full set of records.
+ foreach (int index in batch.offsets)
+ {
+ if (!sent[index])
+ {
+ Key key = keys[index];
+ BatchRecord record = new(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite);
+ sent[index] = true;
+
+ if (record.inDoubt && policy.Txn != null)
+ {
+ policy.Txn.OnWriteInDoubt(record.key);
+ }
+
+ AsyncBatch.OnRecord(cluster, listener, record, index);
+ }
+ }
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchUDFSequenceCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchUDFSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, sent, listener, attr);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, sent, sequenceAP, sequenceSC, batch, attr.hasWrite, parent);
+ }
+ }
+
+ //-------------------------------------------------------
+ // MRT
+ //-------------------------------------------------------
+
+ public sealed class AsyncBatchTxnVerifyExecutor : AsyncBatchExecutor
+ {
+ internal readonly BatchRecordArrayListener listener;
+ private readonly BatchRecord[] records;
+
+ public AsyncBatchTxnVerifyExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ BatchRecordArrayListener listener,
+ Key[] keys,
+ long?[] versions,
+ BatchRecord[] records
+ ) : base(cluster, true)
+ {
+ this.listener = listener;
+ this.records = records;
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, false, this);
+ AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ tasks[count++] = new AsyncBatchTxnVerifyCommand(this, cluster, batchNode, policy, keys, versions, records);
+ }
+ this.commands = tasks;
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess(records, GetStatus());
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(records, ae);
+ }
+ }
+
+ sealed class AsyncBatchTxnVerifyCommand : AsyncBatchCommand
+ {
+ private readonly Key[] keys;
+ private readonly long?[] versions;
+ private readonly BatchRecord[] records;
+
+ public AsyncBatchTxnVerifyCommand(
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ long?[] versions,
+ BatchRecord[] records
+ ) : base(parent, cluster, batch, batchPolicy, false)
+ {
+ this.keys = keys;
+ this.versions = versions;
+ this.records = records;
+ }
+
+ public AsyncBatchTxnVerifyCommand(AsyncBatchTxnVerifyCommand other) : base(other)
+ {
+ this.keys = other.keys;
+ this.versions = other.versions;
+ this.records = other.records;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetBatchTxnVerify(batchPolicy, keys, versions, batch);
+ }
+
+ protected internal override void ParseRow()
+ {
+ SkipKey(fieldCount);
+
+ BatchRecord record = records[batchIndex];
+
+ if (resultCode == ResultCode.OK)
+ {
+ record.resultCode = resultCode;
+ }
+ else
+ {
+ record.SetError(resultCode, false);
+ parent.SetRowError();
+ }
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchTxnVerifyCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchTxnVerifyCommand(parent, cluster, batchNode, batchPolicy, keys, versions, records);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent);
+ }
+ }
+
+ public sealed class AsyncBatchTxnRollExecutor : AsyncBatchExecutor
+ {
+ internal readonly BatchRecordArrayListener listener;
+ private readonly BatchRecord[] records;
+
+ public AsyncBatchTxnRollExecutor
+ (
+ AsyncCluster cluster,
+ BatchPolicy policy,
+ BatchRecordArrayListener listener,
+ Txn txn,
+ Key[] keys,
+ BatchRecord[] records,
+ BatchAttr attr
+ ) : base(cluster, true)
+ {
+ this.listener = listener;
+ this.records = records;
+
+ // Create commands.
+ List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, true, this);
+ AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ tasks[count++] = new AsyncBatchTxnRollCommand(this, cluster, batchNode, policy, txn, keys, records, attr);
+ }
+ this.commands = tasks;
+ }
+
+ protected internal override void OnSuccess()
+ {
+ listener.OnSuccess(records, GetStatus());
+ }
+
+ protected internal override void OnFailure(AerospikeException ae)
+ {
+ listener.OnFailure(records, ae);
+ }
+ }
+
+ sealed class AsyncBatchTxnRollCommand : AsyncBatchCommand
+ {
+ private readonly Txn txn;
+ private readonly Key[] keys;
+ private readonly BatchRecord[] records;
+ private readonly BatchAttr attr;
+
+ public AsyncBatchTxnRollCommand(
+ AsyncBatchExecutor parent,
+ AsyncCluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Txn txn,
+ Key[] keys,
+ BatchRecord[] records,
+ BatchAttr attr
+ ) : base(parent, cluster, batch, batchPolicy, false)
+ {
+ this.txn = txn;
+ this.keys = keys;
+ this.records = records;
+ this.attr = attr;
+ }
+
+ public AsyncBatchTxnRollCommand(AsyncBatchTxnRollCommand other) : base(other)
+ {
+ this.keys = other.keys;
+ this.attr = other.attr;
+ this.records = other.records;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetBatchTxnRoll(batchPolicy, txn, keys, batch, attr);
+ }
+
+ protected internal override void ParseRow()
+ {
+ SkipKey(fieldCount);
+
+ BatchRecord record = records[batchIndex];
+
+ if (resultCode == ResultCode.OK)
+ {
+ record.resultCode = resultCode;
+ }
+ else
+ {
+ record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter));
+ parent.SetRowError();
+ }
+ }
+
+ protected internal override AsyncCommand CloneCommand()
+ {
+ return new AsyncBatchTxnRollCommand(this);
+ }
+
+ internal override AsyncBatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new AsyncBatchTxnRollCommand(parent, cluster, batchNode, batchPolicy, txn, keys, records, attr);
+ }
+
+ internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, true, parent);
+ }
+ }
+
+ //-------------------------------------------------------
+ // Batch Base Executor
+ //-------------------------------------------------------
+
+ public abstract class AsyncBatchExecutor : IBatchStatus
+ {
+ private AerospikeException exception;
+ private int max;
+ private int count;
+ private readonly bool hasResultCode;
+ private bool error;
+ public AsyncBatchCommand[] commands;
+ public AsyncCluster cluster;
+
+ public AsyncBatchExecutor(AsyncCluster cluster, bool hasResultCode)
+ {
+ this.hasResultCode = hasResultCode;
+ this.cluster = cluster;
+ cluster.AddCommandCount();
+ }
+
+ public void Execute()
+ {
+ Execute(commands);
+ }
+
+ public void Execute(AsyncBatchCommand[] commands)
+ {
+ max = commands.Length;
+
+ foreach (AsyncBatchCommand command in commands)
+ {
+ command.Execute();
+ }
+ }
+
+ public void Retry(AsyncMultiCommand[] commands)
+ {
+ lock (this)
+ {
+ // Adjust max for new commands minus failed command.
+ max += commands.Length - 1;
+ }
+
+ foreach (AsyncBatchCommand command in commands.Cast())
+ {
+ command.ExecuteBatchRetry();
+ }
+ }
+
+ public void ChildSuccess(AsyncNode node)
+ {
+ bool complete;
+
+ lock (this)
+ {
+ complete = ++count == max;
+ }
+
+ if (complete)
+ {
+ Finish();
+ }
+ }
+
+ public void ChildFailure(AerospikeException ae)
+ {
+ bool complete;
+
+ lock (this)
+ {
+ if (exception == null)
+ {
+ exception = ae;
+ }
+ complete = ++count == max;
+ }
+
+ if (complete)
+ {
+ Finish();
+ }
+ }
+
+ private void Finish()
+ {
+ if (exception == null)
+ {
+ OnSuccess();
+ }
+ else
+ {
+ OnFailure(exception);
+ }
+ }
+
+ public virtual void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite)
+ {
+ // Only used in executors with sequence listeners.
+ // These executors will override this method.
+ }
+
+ public void BatchKeyError(AerospikeException ae)
+ {
+ error = true;
+
+ if (!hasResultCode)
+ {
+ // Legacy batch read commands that do not store a key specific resultCode.
+ // Store exception which will be passed to the listener on batch completion.
+ if (exception == null)
+ {
+ exception = ae;
+ }
+ }
+ }
+
+ public void SetRowError()
+ {
+ // Indicate that a key specific error occurred.
+ error = true;
+ }
+
+ public bool GetStatus()
+ {
+ return !error;
+ }
+
+ protected internal abstract void OnSuccess();
+ protected internal abstract void OnFailure(AerospikeException ae);
+ }
+
+ //-------------------------------------------------------
+ // Batch Base Command
+ //-------------------------------------------------------
+
+ public abstract class AsyncBatchCommand : AsyncMultiCommand
+ {
+ internal readonly AsyncBatchExecutor parent;
+ internal readonly BatchNode batch;
+ internal readonly BatchPolicy batchPolicy;
+ internal uint sequenceAP;
+ internal uint sequenceSC;
+
+ public AsyncBatchCommand(AsyncBatchExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy batchPolicy, bool isOperation)
+ : base(cluster, batchPolicy, (AsyncNode)batch.node, isOperation)
+ {
+ this.parent = parent;
+ this.batch = batch;
+ this.batchPolicy = batchPolicy;
+ }
+
+ public AsyncBatchCommand(AsyncBatchCommand other) : base(other)
+ {
+ this.parent = other.parent;
+ this.batch = other.batch;
+ this.batchPolicy = other.batchPolicy;
+ this.sequenceAP = other.sequenceAP;
+ this.sequenceSC = other.sequenceSC;
+ }
+
+ protected override Latency.LatencyType GetLatencyType()
+ {
+ return Latency.LatencyType.BATCH;
+ }
+
+ protected void ParseFieldsRead(Key key)
+ {
+ if (policy.Txn != null)
+ {
+ long? version = ParseVersion(fieldCount);
+ policy.Txn.OnRead(key, version);
+ }
+ else
+ {
+ SkipKey(fieldCount);
+ }
+ }
+
+ protected void ParseFields(Key key, bool hasWrite)
+ {
+ if (policy.Txn != null)
+ {
+ long? version = ParseVersion(fieldCount);
+
+ if (hasWrite)
+ {
+ policy.Txn.OnWrite(key, version, resultCode);
+ }
+ else
+ {
+ policy.Txn.OnRead(key, version);
+ }
+ }
+ else
+ {
+ SkipKey(fieldCount);
+ }
+ }
+
+ protected internal override bool PrepareRetry(bool timeout)
+ {
+ if (!(policy.replica == Replica.SEQUENCE || policy.replica == Replica.PREFER_RACK))
+ {
+ // Perform regular retry to same node.
+ return true;
+ }
+
+ sequenceAP++;
+
+ if (! timeout || policy.readModeSC != ReadModeSC.LINEARIZE) {
+ sequenceSC++;
+ }
+ return false;
+ }
+
+ protected internal override bool RetryBatch()
+ {
+ List batchNodes = null;
+
+ try
+ {
+ // Retry requires keys for this node to be split among other nodes.
+ // This can cause an exponential number of commands.
+ batchNodes = GenerateBatchNodes();
+
+ if (batchNodes.Count == 1 && batchNodes[0].node == batch.node)
+ {
+ // Batch node is the same. Go through normal retry.
+ // Normal retries reuse eventArgs, so PutBackArgsOnError()
+ // should not be called here.
+ return false;
+ }
+
+ cluster.AddRetries(batchNodes.Count);
+ }
+ catch (Exception)
+ {
+ // Close original command.
+ base.ReleaseBuffer();
+ throw;
+ }
+
+ // Close original command.
+ base.ReleaseBuffer();
+
+ // Execute new commands.
+ AsyncBatchCommand[] cmds = new AsyncBatchCommand[batchNodes.Count];
+ int count = 0;
+
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ AsyncBatchCommand cmd = CreateCommand(batchNode);
+ cmd.sequenceAP = sequenceAP;
+ cmd.sequenceSC = sequenceSC;
+ cmd.SetBatchRetry(this);
+ cmds[count++] = cmd;
+ }
+
+ // Retry new commands.
+ parent.Retry(cmds);
+
+ // Return true so original batch command is stopped.
+ return true;
+ }
+
+ protected internal override void OnSuccess()
+ {
+ parent.ChildSuccess(node);
+ }
+
+ protected internal override void OnFailure(AerospikeException e)
+ {
+ SetInDoubt(e.InDoubt);
+ parent.ChildFailure(e);
+ }
+
+ internal virtual void SetInDoubt(bool inDoubt)
+ {
+ // Do nothing by default. Batch writes will override this method.
+ }
+
+ internal abstract AsyncBatchCommand CreateCommand(BatchNode batchNode);
+ internal abstract List GenerateBatchNodes();
+ }
+
+ internal class AsyncBatch
+ {
+ internal static void OnRecord(Cluster cluster, BatchRecordSequenceListener listener, BatchRecord record, int index)
+ {
+ try
+ {
+ listener.OnRecord(record, index);
+ }
+ catch (Exception e)
+ {
+ Log.Error(cluster.context, "Unexpected exception from OnRecord(): " + Util.GetErrorMessage(e));
+ }
+ }
+ }
+}
diff --git a/AerospikeClient/Async/AsyncClient.cs b/AerospikeClient/Async/AsyncClient.cs
index a60e2b70..e1647a5b 100644
--- a/AerospikeClient/Async/AsyncClient.cs
+++ b/AerospikeClient/Async/AsyncClient.cs
@@ -15,6 +15,8 @@
* the License.
*/
+using static Aerospike.Client.AsyncQueryValidate;
+
namespace Aerospike.Client
{
///
@@ -57,7 +59,7 @@ public class AsyncClient : AerospikeClient, IAsyncClient
///
///
/// If the connection succeeds, the client is ready to process database requests.
- /// If the connection fails, the cluster will remain in a disconnected state
+ /// If the connection fails, the cluster will remain in a disconnected State
/// until the server is activated.
///
///
@@ -81,7 +83,7 @@ public AsyncClient(string hostname, int port)
///
/// If the connection succeeds, the client is ready to process database requests.
/// If the connection fails and the policy's failOnInvalidHosts is true, a connection
- /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state
+ /// exception will be thrown. Otherwise, the cluster will remain in a disconnected State
/// until the server is activated.
///
///
@@ -110,7 +112,7 @@ public AsyncClient(AsyncClientPolicy policy, string hostname, int port)
///
/// If one connection succeeds, the client is ready to process database requests.
/// If all connections fail and the policy's failIfNotConnected is true, a connection
- /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state
+ /// exception will be thrown. Otherwise, the cluster will remain in a disconnected State
/// until the server is activated.
///
///
@@ -128,6 +130,112 @@ public AsyncClient(AsyncClientPolicy policy, params Host[] hosts)
base.cluster = this.cluster;
}
+ //-------------------------------------------------------
+ // Multi-Record Transactions
+ //-------------------------------------------------------
+
+ ///
+ /// Asynchronously attempt to commit the given multi-record transaction.
+ /// Create listener, call asynchronous commit and return task monitor.
+ ///
+ /// multi-record transaction
+ /// cancellation token
+ public Task Commit(Txn txn, CancellationToken token)
+ {
+ var listener = new CommitListenerAdapter(token);
+ Commit(listener, txn);
+ return listener.Task;
+ }
+
+ ///
+ /// Asynchronously attempt to commit the given multi-record transaction. First, the expected
+ /// record versions are sent to the server nodes for verification. If all nodes return success,
+ /// the transaction is committed. Otherwise, the transaction is aborted.
+ ///
+ /// Schedules the commit command with a channel selector and return.
+ /// Another thread will process the command and send the results to the listener.
+ ///
+ ///
+ /// Requires server version 8.0+
+ ///
+ ///
+ /// where to send results
+ /// multi-record transaction
+ public void Commit(CommitListener listener, Txn txn)
+ {
+ AsyncTxnRoll atr = new(
+ cluster, txnVerifyPolicyDefault, txnRollPolicyDefault, txn
+ );
+
+ switch (txn.State)
+ {
+ default:
+ case Txn.TxnState.OPEN:
+ atr.Verify(listener);
+ break;
+
+ case Txn.TxnState.VERIFIED:
+ atr.Commit(listener);
+ break;
+
+ case Txn.TxnState.COMMITTED:
+ listener.OnSuccess(CommitStatus.CommitStatusType.ALREADY_COMMITTED);
+ break;
+
+ case Txn.TxnState.ABORTED:
+ listener.OnSuccess(CommitStatus.CommitStatusType.ALREADY_ABORTED);
+ break;
+
+ }
+ }
+
+ ///
+ /// Asynchronously attempt to abort and rollback the given multi-record transaction.
+ /// Create listener, call asynchronous commit and return task monitor.
+ ///
+ /// multi-record transaction
+ /// cancellation token
+ public Task Abort(Txn txn, CancellationToken token)
+ {
+ var listener = new AbortListenerAdapter(token);
+ Abort(listener, txn);
+ return listener.Task;
+ }
+
+
+ ///
+ /// Asynchronously abort and rollback the given multi-record transaction.
+ ///
+ /// Schedules the abort command with a channel selector and return.
+ /// Another thread will process the command and send the results to the listener.
+ ///
+ /// Requires server version 8.0+
+ ///
+ ///
+ /// where to send results
+ /// multi-record transaction
+ public void Abort(AbortListener listener, Txn txn)
+ {
+ AsyncTxnRoll atr = new(cluster, null, txnRollPolicyDefault, txn);
+
+ switch (txn.State)
+ {
+ default:
+ case Txn.TxnState.OPEN:
+ case Txn.TxnState.VERIFIED:
+ atr.Abort(listener);
+ break;
+
+ case Txn.TxnState.COMMITTED:
+ listener.OnSuccess(AbortStatus.AbortStatusType.ALREADY_COMMITTED);
+ break;
+
+ case Txn.TxnState.ABORTED:
+ listener.OnSuccess(AbortStatus.AbortStatusType.ALREADY_ABORTED);
+ break;
+ }
+ }
+
//-------------------------------------------------------
// Write Record Operations
//-------------------------------------------------------
@@ -136,7 +244,7 @@ public AsyncClient(AsyncClientPolicy policy, params Host[] hosts)
/// Asynchronously write record bin(s).
/// Create listener, call asynchronous put and return task monitor.
///
- /// The policy specifies the transaction timeout, record expiration and how the transaction is
+ /// The policy specifies the command timeout, record expiration and how the command is
/// handled when the record already exists.
///
///
@@ -151,13 +259,13 @@ public Task Put(WritePolicy policy, CancellationToken token, Key key, params Bin
Put(policy, listener, key, bins);
return listener.Task;
}
-
+
///
/// Asynchronously write record bin(s).
/// Schedules the put command with a channel selector and return.
/// Another thread will process the command and send the results to the listener.
///
- /// The policy specifies the transaction timeout, record expiration and how the transaction is
+ /// The policy specifies the command timeout, record expiration and how the command is
/// handled when the record already exists.
///
///
@@ -173,7 +281,7 @@ public void Put(WritePolicy policy, WriteListener listener, Key key, params Bin[
policy = writePolicyDefault;
}
AsyncWrite async = new AsyncWrite(cluster, policy, listener, key, bins, Operation.Type.WRITE);
- async.Execute();
+ AsyncTxnMonitor.Execute(cluster, policy, async);
}
//-------------------------------------------------------
@@ -184,7 +292,7 @@ public void Put(WritePolicy policy, WriteListener listener, Key key, params Bin[
/// Asynchronously append bin string values to existing record bin values.
/// Create listener, call asynchronous append and return task monitor.
///
- /// The policy specifies the transaction timeout, record expiration and how the transaction is
+ /// The policy specifies the command timeout, record expiration and how the command is
/// handled when the record already exists.
/// This call only works for string values.
///
@@ -206,7 +314,7 @@ public Task Append(WritePolicy policy, CancellationToken token, Key key, params
/// Schedule the append command with a channel selector and return.
/// Another thread will process the command and send the results to the listener.
///
- /// The policy specifies the transaction timeout, record expiration and how the transaction is
+ /// The policy specifies the command timeout, record expiration and how the command is
/// handled when the record already exists.
/// This call only works for string values.
///
@@ -223,14 +331,14 @@ public void Append(WritePolicy policy, WriteListener listener, Key key, params B
policy = writePolicyDefault;
}
AsyncWrite async = new AsyncWrite(cluster, policy, listener, key, bins, Operation.Type.APPEND);
- async.Execute();
+ AsyncTxnMonitor.Execute(cluster, policy, async);
}
///
/// Asynchronously prepend bin string values to existing record bin values.
/// Create listener, call asynchronous prepend and return task monitor.
///
- /// The policy specifies the transaction timeout, record expiration and how the transaction is
+ /// The policy specifies the command timeout, record expiration and how the command is
/// handled when the record already exists.
/// This call works only for string values.
///
@@ -252,7 +360,7 @@ public Task Prepend(WritePolicy policy, CancellationToken token, Key key, params
/// Schedule the prepend command with a channel selector and return.
/// Another thread will process the command and send the results to the listener.
///
- /// The policy specifies the transaction timeout, record expiration and how the transaction is
+ /// The policy specifies the command timeout, record expiration and how the command is
/// handled when the record already exists.
/// This call works only for string values.
///
@@ -269,7 +377,7 @@ public void Prepend(WritePolicy policy, WriteListener listener, Key key, params
policy = writePolicyDefault;
}
AsyncWrite async = new AsyncWrite(cluster, policy, listener, key, bins, Operation.Type.PREPEND);
- async.Execute();
+ AsyncTxnMonitor.Execute(cluster, policy, async);
}
//-------------------------------------------------------
@@ -280,7 +388,7 @@ public void Prepend(WritePolicy policy, WriteListener listener, Key key, params
/// Asynchronously add integer/double bin values to existing record bin values.
/// Create listener, call asynchronous add and return task monitor.
///
- /// The policy specifies the transaction timeout, record expiration and how the transaction is
+ /// The policy specifies the command timeout, record expiration and how the command is
/// handled when the record already exists.
///
///
@@ -301,7 +409,7 @@ public Task Add(WritePolicy policy, CancellationToken token, Key key, params Bin
/// Schedule the add command with a channel selector and return.
/// Another thread will process the command and send the results to the listener.
///
- /// The policy specifies the transaction timeout, record expiration and how the transaction is
+ /// The policy specifies the command timeout, record expiration and how the command is
/// handled when the record already exists.
///
///
@@ -317,7 +425,7 @@ public void Add(WritePolicy policy, WriteListener listener, Key key, params Bin[
policy = writePolicyDefault;
}
AsyncWrite async = new AsyncWrite(cluster, policy, listener, key, bins, Operation.Type.ADD);
- async.Execute();
+ AsyncTxnMonitor.Execute(cluster, policy, async);
}
//-------------------------------------------------------
@@ -355,7 +463,7 @@ public void Delete(WritePolicy policy, DeleteListener listener, Key key)
policy = writePolicyDefault;
}
AsyncDelete async = new AsyncDelete(cluster, policy, key, listener);
- async.Execute();
+ AsyncTxnMonitor.Execute(cluster, policy, async);
}
///
@@ -411,7 +519,8 @@ public void Delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePolicy, Batc
BatchAttr attr = new BatchAttr();
attr.SetDelete(deletePolicy);
- new AsyncBatchOperateRecordArrayExecutor(cluster, batchPolicy, listener, keys, null, attr);
+ AsyncBatchOperateRecordArrayExecutor executor = new(cluster, batchPolicy, listener, keys, null, attr);
+ AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys);
}
///
@@ -451,7 +560,8 @@ public void Delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePolicy, Batc
BatchAttr attr = new BatchAttr();
attr.SetDelete(deletePolicy);
- new AsyncBatchOperateRecordSequenceExecutor(cluster, batchPolicy, listener, keys, null, attr);
+ AsyncBatchOperateRecordSequenceExecutor executor = new(cluster, batchPolicy, listener, keys, null, attr);
+ AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys);
}
//-------------------------------------------------------
@@ -491,7 +601,7 @@ public void Touch(WritePolicy policy, WriteListener listener, Key key)
policy = writePolicyDefault;
}
AsyncTouch async = new AsyncTouch(cluster, policy, listener, key);
- async.Execute();
+ AsyncTxnMonitor.Execute(cluster, policy, async);
}
///
@@ -531,7 +641,7 @@ public void Touched(WritePolicy policy, ExistsListener listener, Key key)
policy = writePolicyDefault;
}
AsyncTouch async = new(cluster, policy, listener, key);
- async.Execute();
+ AsyncTxnMonitor.Execute(cluster, policy, async);
}
//-------------------------------------------------------
@@ -568,6 +678,9 @@ public void Exists(Policy policy, ExistsListener listener, Key key)
{
policy = readPolicyDefault;
}
+
+ policy.Txn?.PrepareRead(key.ns);
+
AsyncExists async = new AsyncExists(cluster, policy, key, listener);
async.Execute();
}
@@ -607,7 +720,10 @@ public void Exists(BatchPolicy policy, ExistsArrayListener listener, Key[] keys)
{
policy = batchPolicyDefault;
}
- new AsyncBatchExistsArrayExecutor(cluster, policy, keys, listener);
+ policy.Txn?.PrepareRead(keys);
+
+ AsyncBatchExistsArrayExecutor executor = new(cluster, policy, keys, listener);
+ executor.Execute();
}
///
@@ -630,7 +746,10 @@ public void Exists(BatchPolicy policy, ExistsSequenceListener listener, Key[] ke
{
policy = batchPolicyDefault;
}
- new AsyncBatchExistsSequenceExecutor(cluster, policy, keys, listener);
+ policy.Txn?.PrepareRead(keys);
+
+ AsyncBatchExistsSequenceExecutor executor = new(cluster, policy, keys, listener);
+ executor.Execute();
}
//-------------------------------------------------------
@@ -667,6 +786,9 @@ public void Get(Policy policy, RecordListener listener, Key key)
{
policy = readPolicyDefault;
}
+
+ policy.Txn?.PrepareRead(key.ns);
+
AsyncRead async = new AsyncRead(cluster, policy, listener, key, (string[])null);
async.Execute();
}
@@ -703,6 +825,9 @@ public void Get(Policy policy, RecordListener listener, Key key, params string[]
{
policy = readPolicyDefault;
}
+
+ policy.Txn?.PrepareRead(key.ns);
+
AsyncRead async = new AsyncRead(cluster, policy, listener, key, binNames);
async.Execute();
}
@@ -737,6 +862,9 @@ public void GetHeader(Policy policy, RecordListener listener, Key key)
{
policy = readPolicyDefault;
}
+
+ policy.Txn?.PrepareRead(key.ns);
+
AsyncReadHeader async = new AsyncReadHeader(cluster, policy, listener, key);
async.Execute();
}
@@ -789,7 +917,10 @@ public void Get(BatchPolicy policy, BatchListListener listener, List
{
policy = batchPolicyDefault;
}
- new AsyncBatchReadListExecutor(cluster, policy, listener, records);
+ policy.Txn?.PrepareRead(records);
+
+ AsyncBatchReadListExecutor executor = new(cluster, policy, listener, records);
+ executor.Execute();
}
///
@@ -817,7 +948,10 @@ public void Get(BatchPolicy policy, BatchSequenceListener listener, List
@@ -861,7 +995,10 @@ public void Get(BatchPolicy policy, RecordArrayListener listener, Key[] keys)
{
policy = batchPolicyDefault;
}
- new AsyncBatchGetArrayExecutor(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false);
+ policy.Txn?.PrepareRead(keys);
+
+ AsyncBatchGetArrayExecutor executor = new(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false);
+ executor.Execute();
}
///
@@ -887,7 +1024,10 @@ public void Get(BatchPolicy policy, RecordSequenceListener listener, Key[] keys)
{
policy = batchPolicyDefault;
}
- new AsyncBatchGetSequenceExecutor(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false);
+ policy.Txn?.PrepareRead(keys);
+
+ AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false);
+ executor.Execute();
}
///
@@ -933,7 +1073,10 @@ public void Get(BatchPolicy policy, RecordArrayListener listener, Key[] keys, pa
{
policy = batchPolicyDefault;
}
- new AsyncBatchGetArrayExecutor(cluster, policy, listener, keys, binNames, null, Command.INFO1_READ, false);
+ policy.Txn?.PrepareRead(keys);
+
+ AsyncBatchGetArrayExecutor executor = new(cluster, policy, listener, keys, binNames, null, Command.INFO1_READ, false);
+ executor.Execute();
}
///
@@ -960,7 +1103,13 @@ public void Get(BatchPolicy policy, RecordSequenceListener listener, Key[] keys,
{
policy = batchPolicyDefault;
}
- new AsyncBatchGetSequenceExecutor(cluster, policy, listener, keys, binNames, null, Command.INFO1_READ, false);
+ policy.Txn?.PrepareRead(keys);
+
+ int readAttr = (binNames == null || binNames.Length == 0)?
+ Command.INFO1_READ | Command.INFO1_GET_ALL : Command.INFO1_READ;
+
+ AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, binNames, null, readAttr, false);
+ executor.Execute();
}
///
@@ -1008,7 +1157,11 @@ public void Get(BatchPolicy policy, RecordArrayListener listener, Key[] keys, pa
{
policy = batchPolicyDefault;
}
- new AsyncBatchGetArrayExecutor(cluster, policy, listener, keys, null, ops, Command.INFO1_READ, true);
+
+ policy.Txn?.PrepareRead(keys);
+
+ AsyncBatchGetArrayExecutor executor = new(cluster, policy, listener, keys, null, ops, Command.INFO1_READ, true);
+ executor.Execute();
}
///
@@ -1037,7 +1190,11 @@ public void Get(BatchPolicy policy, RecordSequenceListener listener, Key[] keys,
{
policy = batchPolicyDefault;
}
- new AsyncBatchGetSequenceExecutor(cluster, policy, listener, keys, null, ops, Command.INFO1_READ, true);
+
+ policy.Txn?.PrepareRead(keys);
+
+ AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, null, ops, Command.INFO1_READ, true);
+ executor.Execute();
}
///
@@ -1081,7 +1238,10 @@ public void GetHeader(BatchPolicy policy, RecordArrayListener listener, Key[] ke
{
policy = batchPolicyDefault;
}
- new AsyncBatchGetArrayExecutor(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false);
+ policy.Txn?.PrepareRead(keys);
+
+ AsyncBatchGetArrayExecutor executor = new(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false);
+ executor.Execute();
}
///
@@ -1107,7 +1267,10 @@ public void GetHeader(BatchPolicy policy, RecordSequenceListener listener, Key[]
{
policy = batchPolicyDefault;
}
- new AsyncBatchGetSequenceExecutor(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false);
+ policy.Txn?.PrepareRead(keys);
+
+ AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false);
+ executor.Execute();
}
//-------------------------------------------------------
@@ -1155,9 +1318,21 @@ public Task Operate(WritePolicy policy, CancellationToken token, Key key
/// if queue is full
public void Operate(WritePolicy policy, RecordListener listener, Key key, params Operation[] ops)
{
- OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, key, ops);
- AsyncOperate async = new AsyncOperate(cluster, listener, key, args);
- async.Execute();
+ OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, ops);
+ policy = args.writePolicy;
+
+ if (args.hasWrite)
+ {
+ AsyncOperateWrite async = new(cluster, listener, key, args);
+ AsyncTxnMonitor.Execute(cluster, policy, async);
+ }
+ else
+ {
+ policy.Txn?.PrepareRead(key.ns);
+
+ AsyncOperateRead async = new(cluster, listener, key, args);
+ async.Execute();
+ }
}
//-------------------------------------------------------
@@ -1211,7 +1386,8 @@ public void Operate(BatchPolicy policy, BatchOperateListListener listener, List<
{
policy = batchParentPolicyWriteDefault;
}
- new AsyncBatchOperateListExecutor(cluster, policy, listener, records);
+ AsyncBatchOperateListExecutor executor = new(cluster, policy, listener, records);
+ AsyncTxnMonitor.ExecuteBatch(policy, executor, records);
}
///
@@ -1245,7 +1421,8 @@ public void Operate(BatchPolicy policy, BatchRecordSequenceListener listener, Li
{
policy = batchParentPolicyWriteDefault;
}
- new AsyncBatchOperateSequenceExecutor(cluster, policy, listener, records);
+ AsyncBatchOperateSequenceExecutor executor = new(cluster, policy, listener, records);
+ AsyncTxnMonitor.ExecuteBatch(policy, executor, records);
}
///
@@ -1309,7 +1486,8 @@ public void Operate(BatchPolicy batchPolicy, BatchWritePolicy writePolicy, Batch
}
BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops);
- new AsyncBatchOperateRecordArrayExecutor(cluster, batchPolicy, listener, keys, ops, attr);
+ AsyncBatchOperateRecordArrayExecutor executor = new(cluster, batchPolicy, listener, keys, ops, attr);
+ AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys);
}
///
@@ -1352,7 +1530,8 @@ public void Operate(BatchPolicy batchPolicy, BatchWritePolicy writePolicy, Batch
}
BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops);
- new AsyncBatchOperateRecordSequenceExecutor(cluster, batchPolicy, listener, keys, ops, attr);
+ AsyncBatchOperateRecordSequenceExecutor executor = new(cluster, batchPolicy, listener, keys, ops, attr);
+ AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys);
}
//-------------------------------------------------------
@@ -1454,7 +1633,7 @@ public Task
@@ -189,7 +235,7 @@ public interface IAsyncClient : IAerospikeClient
/// Asynchronously delete record for specified key.
/// Create listener, call asynchronous delete and return task monitor.
///
- /// The policy specifies the transaction timeout.
+ /// The policy specifies the command timeout.
///
///
/// delete configuration parameters, pass in null for defaults
@@ -880,7 +926,7 @@ public interface IAsyncClient : IAerospikeClient
/// server package name where user defined function resides
/// user defined function
/// arguments passed in to user defined function
- /// if transaction fails
+ /// if command fails
void Execute(WritePolicy policy, ExecuteListener listener, Key key, string packageName, string functionName, params Value[] functionArgs);
///
diff --git a/AerospikeClient/AsyncTask/AbortListenerAdapter.cs b/AerospikeClient/AsyncTask/AbortListenerAdapter.cs
new file mode 100644
index 00000000..ce3de111
--- /dev/null
+++ b/AerospikeClient/AsyncTask/AbortListenerAdapter.cs
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+using static Aerospike.Client.AbortStatus;
+
+namespace Aerospike.Client
+{
+ internal sealed class AbortListenerAdapter : ListenerAdapter, AbortListener
+ {
+ public AbortListenerAdapter(CancellationToken token)
+ : base(token)
+ {
+ }
+
+ public void OnSuccess(AbortStatusType status)
+ {
+ SetResult(status);
+ }
+ }
+}
diff --git a/AerospikeClient/AsyncTask/BatchOperateListListenerAdapter.cs b/AerospikeClient/AsyncTask/BatchOperateListListenerAdapter.cs
index 6b2616fc..44c295f5 100644
--- a/AerospikeClient/AsyncTask/BatchOperateListListenerAdapter.cs
+++ b/AerospikeClient/AsyncTask/BatchOperateListListenerAdapter.cs
@@ -29,7 +29,7 @@ public BatchOperateListListenerAdapter(CancellationToken token)
public void OnSuccess(List records, bool status)
{
// records is an argument to the async call, so the user already has access to it.
- // Set completion status: true if all batch sub-transactions were successful.
+ // Set completion status: true if all batch sub-commands were successful.
SetResult(status);
}
}
diff --git a/AerospikeClient/AsyncTask/CommitListenerAdapter.cs b/AerospikeClient/AsyncTask/CommitListenerAdapter.cs
new file mode 100644
index 00000000..11c5c46c
--- /dev/null
+++ b/AerospikeClient/AsyncTask/CommitListenerAdapter.cs
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+using static Aerospike.Client.CommitStatus;
+
+namespace Aerospike.Client
+{
+ internal sealed class CommitListenerAdapter : ListenerAdapter, CommitListener
+ {
+ public CommitListenerAdapter(CancellationToken token)
+ : base(token)
+ {
+ }
+
+ public void OnSuccess(CommitStatusType status)
+ {
+ SetResult(status);
+ }
+
+ public void OnFailure(AerospikeException.Commit exception)
+ {
+ base.OnFailure(exception);
+ }
+ }
+}
diff --git a/AerospikeClient/Cluster/Cluster.cs b/AerospikeClient/Cluster/Cluster.cs
index c6f35f6a..190c6b78 100644
--- a/AerospikeClient/Cluster/Cluster.cs
+++ b/AerospikeClient/Cluster/Cluster.cs
@@ -14,10 +14,7 @@
* License for the specific language governing permissions and limitations under
* the License.
*/
-using System;
-using System.Collections.Generic;
using System.Text;
-using System.Threading;
namespace Aerospike.Client
{
@@ -97,7 +94,7 @@ public class Cluster
// Login timeout.
protected internal readonly int loginTimeout;
- // Maximum socket idle to validate connections in transactions.
+ // Maximum socket idle to validate connections in commands.
private readonly double maxSocketIdleMillisTran;
// Maximum socket idle to trim peak connections to min connections.
@@ -137,7 +134,7 @@ public class Cluster
public MetricsPolicy MetricsPolicy;
private volatile IMetricsListener metricsListener;
private volatile int retryCount;
- private volatile int tranCount;
+ private volatile int commandCount;
private volatile int delayQueueTimeoutCount;
public Cluster(ClientPolicy policy, Host[] hosts)
@@ -274,6 +271,74 @@ public Cluster(ClientPolicy policy, Host[] hosts)
cancelToken = cancel.Token;
}
+ public void StartTendThread(ClientPolicy policy)
+ {
+ if (policy.forceSingleNode)
+ {
+ // Communicate with the first seed node only.
+ // Do not run cluster tend thread.
+ try
+ {
+ ForceSingleNode();
+ }
+ catch (Exception)
+ {
+ Close();
+ throw;
+ }
+ }
+ else
+ {
+ InitTendThread(policy.failIfNotConnected);
+ }
+ }
+
+ public void ForceSingleNode()
+ {
+ // Initialize tendThread, but do not start it.
+ tendValid = true;
+ tendThread = new Thread(new ThreadStart(this.Run));
+
+ // Validate first seed.
+ Host seed = seeds[0];
+ NodeValidator nv = new();
+ Node node = null;
+
+ try
+ {
+ node = nv.SeedNode(this, seed, null);
+ }
+ catch (Exception e)
+ {
+ throw new AerospikeException("Seed " + seed + " failed: " + e.Message, e);
+ }
+
+ node.CreateMinConnections();
+
+ // Add seed node to nodes.
+ Dictionary nodesToAdd = new(1);
+ nodesToAdd[node.Name] = node;
+ AddNodes(nodesToAdd);
+
+ // Initialize partitionMaps.
+ Peers peers = new(nodes.Length + 16);
+ node.RefreshPartitions(peers);
+
+ // Set partition maps for all namespaces to point to same node.
+ foreach (Partitions partitions in partitionMap.Values)
+ {
+ foreach (Node[] nodeArray in partitions.replicas)
+ {
+ int max = nodeArray.Length;
+
+ for (int i = 0; i < max; i++)
+ {
+ nodeArray[i] = node;
+ }
+ }
+ }
+ }
+
public virtual void InitTendThread(bool failIfNotConnected)
{
// Tend cluster until all nodes identified.
@@ -1201,26 +1266,26 @@ private static bool SupportsPartitionQuery(Node[] nodes)
}
///
- /// Increment transaction count when metrics are enabled.
+ /// Increment command count when metrics are enabled.
///
- public void AddTran()
+ public void AddCommandCount()
{
if (MetricsEnabled)
{
- Interlocked.Increment(ref tranCount);
+ Interlocked.Increment(ref commandCount);
}
}
///
- /// Return transaction count. The value is cumulative and not reset per metrics interval.
+ /// Return command count. The value is cumulative and not reset per metrics interval.
///
- public int GetTranCount()
+ public int GetCommandCount()
{
- return tranCount;
+ return commandCount;
}
///
- /// Increment transaction retry count. There can be multiple retries for a single transaction.
+ /// Increment command retry count. There can be multiple retries for a single command.
///
public void AddRetry()
{
@@ -1228,7 +1293,7 @@ public void AddRetry()
}
///
- /// Add transaction retry count. There can be multiple retries for a single transaction.
+ /// Add command retry count. There can be multiple retries for a single command.
///
public void AddRetries(int count)
{
@@ -1236,7 +1301,7 @@ public void AddRetries(int count)
}
///
- /// Return transaction retry count. The value is cumulative and not reset per metrics interval.
+ /// Return command retry count. The value is cumulative and not reset per metrics interval.
///
public int GetRetryCount()
{
diff --git a/AerospikeClient/Cluster/ClusterStats.cs b/AerospikeClient/Cluster/ClusterStats.cs
index 0196fdf3..7e7f855f 100644
--- a/AerospikeClient/Cluster/ClusterStats.cs
+++ b/AerospikeClient/Cluster/ClusterStats.cs
@@ -51,7 +51,7 @@ public sealed class ClusterStats
public readonly int invalidNodeCount;
///
- /// Count of transaction retires since cluster was started.
+ /// Count of command retries since cluster was started.
///
public readonly long RetryCount;
@@ -126,14 +126,14 @@ public sealed class NodeStats
public readonly ConnectionStats asyncStats;
///
- /// Transaction error count since node was initialized. If the error is retryable, multiple errors per
- /// transaction may occur.
+ /// Command error count since node was initialized. If the error is retryable, multiple errors per
+ /// Command may occur.
///
public readonly long ErrorCount;
///
- /// Transaction timeout count since node was initialized. If the timeout is retryable (ie socketTimeout),
- /// multiple timeouts per transaction may occur.
+ /// Command timeout count since node was initialized. If the timeout is retryable (ie socketTimeout),
+ /// multiple timeouts per Command may occur.
///
public readonly long TimeoutCount;
diff --git a/AerospikeClient/Cluster/ConnectionRecover.cs b/AerospikeClient/Cluster/ConnectionRecover.cs
index 4127e1cc..5d1ffe1e 100644
--- a/AerospikeClient/Cluster/ConnectionRecover.cs
+++ b/AerospikeClient/Cluster/ConnectionRecover.cs
@@ -284,7 +284,7 @@ private void ParseProto(byte[] buf, int bytesRead)
if (compressed)
{
// Do not recover connections with compressed data because that would
- // require saving large buffers with associated state and performing decompression
+ // require saving large buffers with associated State and performing decompression
// just to drain the connection.
throw new AerospikeException("Recovering connections with compressed multi-record data is not supported");
}
diff --git a/AerospikeClient/Cluster/Node.cs b/AerospikeClient/Cluster/Node.cs
index 2ac8712a..d9887d30 100644
--- a/AerospikeClient/Cluster/Node.cs
+++ b/AerospikeClient/Cluster/Node.cs
@@ -740,7 +740,7 @@ public Connection GetConnection(int timeoutMillis, int timeoutDelay)
{
if (timeoutDelay > 0)
{
- // The connection state is always STATE_READ_AUTH_HEADER here which does not reference
+ // The connection State is always STATE_READ_AUTH_HEADER here which does not reference
// isSingle, so just pass in true for isSingle in ConnectionRecover.
cluster.RecoverConnection(new ConnectionRecover(conn, this, timeoutDelay, crt, true));
conn = null;
@@ -993,8 +993,8 @@ public void ValidateErrorCount()
}
///
- /// Increment transaction error count. If the error is retryable, multiple errors per
- /// transaction may occur.
+ /// Increment command error count. If the error is retryable, multiple errors per
+ /// command may occur.
///
public void AddError()
@@ -1003,8 +1003,8 @@ public void AddError()
}
///
- /// Increment transaction timeout count. If the timeout is retryable (ie socketTimeout),
- /// multiple timeouts per transaction may occur.
+ /// Increment command timeout count. If the timeout is retryable (ie socketTimeout),
+ /// multiple timeouts per command may occur.
///
public void AddTimeout()
{
@@ -1012,7 +1012,7 @@ public void AddTimeout()
}
///
- /// Return transaction error count. The value is cumulative and not reset per metrics interval.
+ /// Return command error count. The value is cumulative and not reset per metrics interval.
///
public int GetErrorCount()
{
@@ -1020,7 +1020,7 @@ public int GetErrorCount()
}
///
- /// Return transaction timeout count. The value is cumulative and not reset per metrics interval.
+ /// Return command timeout count. The value is cumulative and not reset per metrics interval.
///
public int GetTimeoutCount()
{
diff --git a/AerospikeClient/Cluster/NodeValidator.cs b/AerospikeClient/Cluster/NodeValidator.cs
index cf398ca7..d5599f61 100644
--- a/AerospikeClient/Cluster/NodeValidator.cs
+++ b/AerospikeClient/Cluster/NodeValidator.cs
@@ -100,6 +100,11 @@ public Node SeedNode(Cluster cluster, Host host, Peers peers)
private bool ValidatePeers(Peers peers, Node node)
{
+ if (peers == null)
+ {
+ return true;
+ }
+
try
{
peers.refreshCount = 0;
diff --git a/AerospikeClient/Command/Batch.cs b/AerospikeClient/Command/Batch.cs
index 4ecbb5c8..ac7d7b0a 100644
--- a/AerospikeClient/Command/Batch.cs
+++ b/AerospikeClient/Command/Batch.cs
@@ -1,657 +1,842 @@
-/*
- * Copyright 2012-2024 Aerospike, Inc.
- *
- * Portions may be licensed to Aerospike, Inc. under one or more contributor
- * license agreements.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-using System.Collections;
-
-namespace Aerospike.Client
-{
- //-------------------------------------------------------
- // ReadList
- //-------------------------------------------------------
-
- public sealed class BatchReadListCommand : BatchCommand
- {
- private readonly List records;
-
- public BatchReadListCommand
- (
- Cluster cluster,
- BatchNode batch,
- BatchPolicy policy,
- List records,
- BatchStatus status
- ) : base(cluster, batch, policy, status, true)
- {
- this.records = records;
- }
-
- protected internal override void WriteBuffer()
- {
- if (batch.node != null && batch.node.HasBatchAny)
- {
- SetBatchOperate(batchPolicy, records, batch);
- }
- else
- {
- SetBatchRead(batchPolicy, records, batch);
- }
- }
-
- protected internal override bool ParseRow()
- {
- SkipKey(fieldCount);
-
- BatchRead record = records[batchIndex];
-
- if (resultCode == 0)
- {
- record.SetRecord(ParseRecord());
- }
- else
- {
- record.SetError(resultCode, false);
- status.SetRowError();
- }
- return true;
- }
-
- protected internal override BatchCommand CreateCommand(BatchNode batchNode)
- {
- return new BatchReadListCommand(cluster, batchNode, batchPolicy, records, status);
- }
-
- protected internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, status);
- }
- }
-
- //-------------------------------------------------------
- // GetArray
- //-------------------------------------------------------
-
- public sealed class BatchGetArrayCommand : BatchCommand
- {
- private readonly Key[] keys;
- private readonly string[] binNames;
- private readonly Operation[] ops;
- private readonly Record[] records;
- private readonly int readAttr;
-
- public BatchGetArrayCommand
- (
- Cluster cluster,
- BatchNode batch,
- BatchPolicy policy,
- Key[] keys,
- string[] binNames,
- Operation[] ops,
- Record[] records,
- int readAttr,
- bool isOperation,
- BatchStatus status
- ) : base(cluster, batch, policy, status, isOperation)
- {
- this.keys = keys;
- this.binNames = binNames;
- this.ops = ops;
- this.records = records;
- this.readAttr = readAttr;
- }
-
- protected internal override void WriteBuffer()
- {
- if (batch.node != null && batch.node.HasBatchAny)
- {
- BatchAttr attr = new BatchAttr(policy, readAttr, ops);
- SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr);
- }
- else
- {
- SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr);
- }
- }
-
- protected internal override bool ParseRow()
- {
- SkipKey(fieldCount);
-
- if (resultCode == 0)
- {
- records[batchIndex] = ParseRecord();
- }
- return true;
- }
-
- protected internal override BatchCommand CreateCommand(BatchNode batchNode)
- {
- return new BatchGetArrayCommand(cluster, batchNode, batchPolicy, keys, binNames, ops, records, readAttr, isOperation, status);
- }
-
- protected internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, status);
- }
- }
-
- //-------------------------------------------------------
- // ExistsArray
- //-------------------------------------------------------
-
- public sealed class BatchExistsArrayCommand : BatchCommand
- {
- private readonly Key[] keys;
- private readonly bool[] existsArray;
-
- public BatchExistsArrayCommand
- (
- Cluster cluster,
- BatchNode batch,
- BatchPolicy policy,
- Key[] keys,
- bool[] existsArray,
- BatchStatus status
- ) : base(cluster, batch, policy, status, false)
- {
- this.keys = keys;
- this.existsArray = existsArray;
- }
-
- protected internal override void WriteBuffer()
- {
- if (batch.node != null && batch.node.HasBatchAny)
- {
- BatchAttr attr = new BatchAttr(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA);
- SetBatchOperate(batchPolicy, keys, batch, null, null, attr);
- }
- else
- {
- SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA);
- }
- }
-
- protected internal override bool ParseRow()
- {
- SkipKey(fieldCount);
-
- if (opCount > 0)
- {
- throw new AerospikeException.Parse("Received bins that were not requested!");
- }
-
- existsArray[batchIndex] = resultCode == 0;
- return true;
- }
-
- protected internal override BatchCommand CreateCommand(BatchNode batchNode)
- {
- return new BatchExistsArrayCommand(cluster, batchNode, batchPolicy, keys, existsArray, status);
- }
-
- protected internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, status);
- }
- }
-
- //-------------------------------------------------------
- // OperateList
- //-------------------------------------------------------
-
- public sealed class BatchOperateListCommand : BatchCommand
- {
- private readonly IList records;
-
- public BatchOperateListCommand
- (
- Cluster cluster,
- BatchNode batch,
- BatchPolicy policy,
- IList records,
- BatchStatus status
- ) : base(cluster, batch, policy, status, true)
- {
- this.records = records;
- }
-
- protected internal override bool IsWrite()
- {
- // This method is only called to set inDoubt on node level errors.
- // SetError() will filter out reads when setting record level inDoubt.
- return true;
- }
-
- protected internal override void WriteBuffer()
- {
- SetBatchOperate(batchPolicy, (IList)records, batch);
- }
-
- protected internal override bool ParseRow()
- {
- SkipKey(fieldCount);
-
- BatchRecord record = records[batchIndex];
-
- if (resultCode == 0)
- {
- record.SetRecord(ParseRecord());
- return true;
- }
-
- if (resultCode == ResultCode.UDF_BAD_RESPONSE)
- {
- Record r = ParseRecord();
- string m = r.GetString("FAILURE");
-
- if (m != null)
- {
- // Need to store record because failure bin contains an error message.
- record.record = r;
- record.resultCode = resultCode;
- record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter);
- status.SetRowError();
- return true;
- }
- }
-
- record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter));
- status.SetRowError();
- return true;
- }
-
- protected internal override void SetInDoubt(bool inDoubt)
- {
- if (!inDoubt)
- {
- return;
- }
-
- foreach (int index in batch.offsets)
- {
- BatchRecord record = records[index];
-
- if (record.resultCode == ResultCode.NO_RESPONSE)
- {
- record.inDoubt = record.hasWrite;
- }
- }
- }
-
- protected internal override BatchCommand CreateCommand(BatchNode batchNode)
- {
- return new BatchOperateListCommand(cluster, batchNode, batchPolicy, records, status);
- }
-
- protected internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, (IList)records, sequenceAP, sequenceSC, batch, status);
- }
- }
-
- //-------------------------------------------------------
- // OperateArray
- //-------------------------------------------------------
-
- public sealed class BatchOperateArrayCommand : BatchCommand
- {
- private readonly Key[] keys;
- private readonly Operation[] ops;
- private readonly BatchRecord[] records;
- private readonly BatchAttr attr;
-
- public BatchOperateArrayCommand
- (
- Cluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- Key[] keys,
- Operation[] ops,
- BatchRecord[] records,
- BatchAttr attr,
- BatchStatus status
- ) : base(cluster, batch, batchPolicy, status, ops != null)
- {
- this.keys = keys;
- this.ops = ops;
- this.records = records;
- this.attr = attr;
- }
-
- protected internal override bool IsWrite()
- {
- return attr.hasWrite;
- }
-
- protected internal override void WriteBuffer()
- {
- SetBatchOperate(batchPolicy, keys, batch, null, ops, attr);
- }
-
- protected internal override bool ParseRow()
- {
- SkipKey(fieldCount);
-
- BatchRecord record = records[batchIndex];
-
- if (resultCode == 0)
- {
- record.SetRecord(ParseRecord());
- }
- else
- {
- record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter));
- status.SetRowError();
- }
- return true;
- }
-
- protected internal override void SetInDoubt(bool inDoubt)
- {
- if (!inDoubt || !attr.hasWrite)
- {
- return;
- }
-
- foreach (int index in batch.offsets)
- {
- BatchRecord record = records[index];
-
- if (record.resultCode == ResultCode.NO_RESPONSE)
- {
- record.inDoubt = inDoubt;
- }
- }
- }
-
- protected internal override BatchCommand CreateCommand(BatchNode batchNode)
- {
- return new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, ops, records, attr, status);
- }
-
- protected internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status);
- }
- }
-
- //-------------------------------------------------------
- // UDF
- //-------------------------------------------------------
-
- public sealed class BatchUDFCommand : BatchCommand
- {
- private readonly Key[] keys;
- private readonly string packageName;
- private readonly string functionName;
- private readonly byte[] argBytes;
- private readonly BatchRecord[] records;
- private readonly BatchAttr attr;
-
- public BatchUDFCommand
- (
- Cluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- Key[] keys,
- string packageName,
- string functionName,
- byte[] argBytes,
- BatchRecord[] records,
- BatchAttr attr,
- BatchStatus status
- ) : base(cluster, batch, batchPolicy, status, false)
- {
- this.keys = keys;
- this.packageName = packageName;
- this.functionName = functionName;
- this.argBytes = argBytes;
- this.records = records;
- this.attr = attr;
- }
-
- protected internal override bool IsWrite()
- {
- return attr.hasWrite;
- }
-
- protected internal override void WriteBuffer()
- {
- SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr);
- }
-
- protected internal override bool ParseRow()
- {
- SkipKey(fieldCount);
-
- BatchRecord record = records[batchIndex];
-
- if (resultCode == 0)
- {
- record.SetRecord(ParseRecord());
- return true;
- }
-
- if (resultCode == ResultCode.UDF_BAD_RESPONSE)
- {
- Record r = ParseRecord();
- string m = r.GetString("FAILURE");
-
- if (m != null)
- {
- // Need to store record because failure bin contains an error message.
- record.record = r;
- record.resultCode = resultCode;
- record.inDoubt = Command.BatchInDoubt(attr.hasWrite, commandSentCounter);
- status.SetRowError();
- return true;
- }
- }
-
- record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter));
- status.SetRowError();
- return true;
- }
-
- protected internal override void SetInDoubt(bool inDoubt)
- {
- if (!inDoubt || !attr.hasWrite)
- {
- return;
- }
-
- foreach (int index in batch.offsets)
- {
- BatchRecord record = records[index];
-
- if (record.resultCode == ResultCode.NO_RESPONSE)
- {
- record.inDoubt = inDoubt;
- }
- }
- }
-
- protected internal override BatchCommand CreateCommand(BatchNode batchNode)
- {
- return new BatchUDFCommand(cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr, status);
- }
-
- protected internal override List GenerateBatchNodes()
- {
- return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status);
- }
- }
-
- //-------------------------------------------------------
- // Batch Base Command
- //-------------------------------------------------------
-
- public abstract class BatchCommand : MultiCommand
- {
- internal readonly BatchNode batch;
- internal readonly BatchPolicy batchPolicy;
- internal readonly BatchStatus status;
- internal BatchExecutor parent;
- internal uint sequenceAP;
- internal uint sequenceSC;
- internal bool splitRetry;
-
- public BatchCommand
- (
- Cluster cluster,
- BatchNode batch,
- BatchPolicy batchPolicy,
- BatchStatus status,
- bool isOperation
- ) : base(cluster, batchPolicy, batch.node, isOperation)
- {
- this.batch = batch;
- this.batchPolicy = batchPolicy;
- this.status = status;
- }
-
- public void Run(object obj)
- {
- try
- {
- Execute();
- }
- catch (AerospikeException ae)
- {
- // Set error/inDoubt for keys associated this batch command when
- // the command was not retried and split. If a split retry occurred,
- // those new subcommands have already set error/inDoubt on the affected
- // subset of keys.
- if (!splitRetry)
- {
- SetInDoubt(ae.InDoubt);
- }
- status.SetException(ae);
- }
- catch (Exception e)
- {
- if (!splitRetry)
- {
- SetInDoubt(true);
- }
- status.SetException(e);
- }
- finally
- {
- parent.OnComplete();
- }
- }
-
- protected override Latency.LatencyType GetLatencyType()
- {
- return Latency.LatencyType.BATCH;
- }
-
- protected internal override bool PrepareRetry(bool timeout)
- {
- if (!((batchPolicy.replica == Replica.SEQUENCE || batchPolicy.replica == Replica.PREFER_RACK) &&
- (parent == null || !parent.IsDone())))
- {
- // Perform regular retry to same node.
- return true;
- }
- sequenceAP++;
-
- if (!timeout || batchPolicy.readModeSC != ReadModeSC.LINEARIZE)
- {
- sequenceSC++;
- }
- return false;
- }
-
- protected internal override bool RetryBatch
- (
- Cluster cluster,
- int socketTimeout,
- int totalTimeout,
- DateTime deadline,
- int iteration,
- int commandSentCounter
- )
- {
- // Retry requires keys for this node to be split among other nodes.
- // This is both recursive and exponential.
- List batchNodes = GenerateBatchNodes();
-
- if (batchNodes.Count == 1 && batchNodes[0].node == batch.node)
- {
- // Batch node is the same. Go through normal retry.
- return false;
- }
-
- splitRetry = true;
-
- // Run batch requests sequentially in same thread.
- foreach (BatchNode batchNode in batchNodes)
- {
- BatchCommand command = CreateCommand(batchNode);
- command.parent = parent;
- command.sequenceAP = sequenceAP;
- command.sequenceSC = sequenceSC;
- command.socketTimeout = socketTimeout;
- command.totalTimeout = totalTimeout;
- command.iteration = iteration;
- command.commandSentCounter = commandSentCounter;
- command.deadline = deadline;
-
- try
- {
- cluster.AddRetry();
- command.ExecuteCommand();
- }
- catch (AerospikeException ae)
- {
- if (!command.splitRetry)
- {
- command.SetInDoubt(ae.InDoubt);
- }
- status.SetException(ae);
-
- if (!batchPolicy.respondAllKeys)
- {
- throw;
- }
- }
- catch (Exception e)
- {
- if (!command.splitRetry)
- {
- command.SetInDoubt(true);
- }
- status.SetException(e);
-
- if (!batchPolicy.respondAllKeys)
- {
- throw;
- }
- }
- }
- return true;
- }
-
- protected internal virtual void SetInDoubt(bool inDoubt)
- {
- // Do nothing by default. Batch writes will override this method.
- }
-
- protected internal abstract BatchCommand CreateCommand(BatchNode batchNode);
- protected internal abstract List GenerateBatchNodes();
- }
-}
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+using System.Collections;
+
+namespace Aerospike.Client
+{
+ //-------------------------------------------------------
+ // ReadList
+ //-------------------------------------------------------
+
+ public sealed class BatchReadListCommand : BatchCommand
+ {
+ private readonly List records;
+
+ public BatchReadListCommand
+ (
+ Cluster cluster,
+ BatchNode batch,
+ BatchPolicy policy,
+ List records,
+ BatchStatus status
+ ) : base(cluster, batch, policy, status, true)
+ {
+ this.records = records;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ if (batch.node != null && batch.node.HasBatchAny)
+ {
+ SetBatchOperate(batchPolicy, records, batch);
+ }
+ else
+ {
+ SetBatchRead(batchPolicy, records, batch);
+ }
+ }
+
+ protected internal override bool ParseRow()
+ {
+ BatchRead record = records[batchIndex];
+
+ ParseFieldsRead(record.key);
+
+ if (resultCode == 0)
+ {
+ record.SetRecord(ParseRecord());
+ }
+ else
+ {
+ record.SetError(resultCode, false);
+ status.SetRowError();
+ }
+ return true;
+ }
+
+ protected internal override BatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new BatchReadListCommand(cluster, batchNode, batchPolicy, records, status);
+ }
+
+ protected internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, status);
+ }
+ }
+
+ //-------------------------------------------------------
+ // GetArray
+ //-------------------------------------------------------
+
+ public sealed class BatchGetArrayCommand : BatchCommand
+ {
+ private readonly Key[] keys;
+ private readonly string[] binNames;
+ private readonly Operation[] ops;
+ private readonly Record[] records;
+ private readonly int readAttr;
+
+ public BatchGetArrayCommand
+ (
+ Cluster cluster,
+ BatchNode batch,
+ BatchPolicy policy,
+ Key[] keys,
+ string[] binNames,
+ Operation[] ops,
+ Record[] records,
+ int readAttr,
+ bool isOperation,
+ BatchStatus status
+ ) : base(cluster, batch, policy, status, isOperation)
+ {
+ this.keys = keys;
+ this.binNames = binNames;
+ this.ops = ops;
+ this.records = records;
+ this.readAttr = readAttr;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ if (batch.node != null && batch.node.HasBatchAny)
+ {
+ BatchAttr attr = new(policy, readAttr, ops);
+ SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr);
+ }
+ else
+ {
+ SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr);
+ }
+ }
+
+ protected internal override bool ParseRow()
+ {
+ ParseFieldsRead(keys[batchIndex]);
+
+ if (resultCode == 0)
+ {
+ records[batchIndex] = ParseRecord();
+ }
+ return true;
+ }
+
+ protected internal override BatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new BatchGetArrayCommand(cluster, batchNode, batchPolicy, keys, binNames, ops, records, readAttr, isOperation, status);
+ }
+
+ protected internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, status);
+ }
+ }
+
+ //-------------------------------------------------------
+ // ExistsArray
+ //-------------------------------------------------------
+
+ public sealed class BatchExistsArrayCommand : BatchCommand
+ {
+ private readonly Key[] keys;
+ private readonly bool[] existsArray;
+
+ public BatchExistsArrayCommand
+ (
+ Cluster cluster,
+ BatchNode batch,
+ BatchPolicy policy,
+ Key[] keys,
+ bool[] existsArray,
+ BatchStatus status
+ ) : base(cluster, batch, policy, status, false)
+ {
+ this.keys = keys;
+ this.existsArray = existsArray;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ if (batch.node != null && batch.node.HasBatchAny)
+ {
+ BatchAttr attr = new(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA);
+ SetBatchOperate(batchPolicy, keys, batch, null, null, attr);
+ }
+ else
+ {
+ SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA);
+ }
+ }
+
+ protected internal override bool ParseRow()
+ {
+ ParseFieldsRead(keys[batchIndex]);
+ existsArray[batchIndex] = resultCode == 0;
+ return true;
+ }
+
+ protected internal override BatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new BatchExistsArrayCommand(cluster, batchNode, batchPolicy, keys, existsArray, status);
+ }
+
+ protected internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, status);
+ }
+ }
+
+ //-------------------------------------------------------
+ // OperateList
+ //-------------------------------------------------------
+
+ public sealed class BatchOperateListCommand : BatchCommand
+ {
+ private readonly IList records;
+
+ public BatchOperateListCommand
+ (
+ Cluster cluster,
+ BatchNode batch,
+ BatchPolicy policy,
+ IList records,
+ BatchStatus status
+ ) : base(cluster, batch, policy, status, true)
+ {
+ this.records = records;
+ }
+
+ protected internal override bool IsWrite()
+ {
+ // This method is only called to set inDoubt on node level errors.
+ // SetError() will filter out reads when setting record level inDoubt.
+ return true;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetBatchOperate(batchPolicy, (IList)records, batch);
+ }
+
+ protected internal override bool ParseRow()
+ {
+ BatchRecord record = records[batchIndex];
+
+ ParseFields(record);
+
+ if (resultCode == 0)
+ {
+ record.SetRecord(ParseRecord());
+ return true;
+ }
+
+ if (resultCode == ResultCode.UDF_BAD_RESPONSE)
+ {
+ Record r = ParseRecord();
+ string m = r.GetString("FAILURE");
+
+ if (m != null)
+ {
+ // Need to store record because failure bin contains an error message.
+ record.record = r;
+ record.resultCode = resultCode;
+ record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter);
+ status.SetRowError();
+ return true;
+ }
+ }
+
+ record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter));
+ status.SetRowError();
+ return true;
+ }
+
+ protected internal override void InDoubt()
+ {
+ foreach (int index in batch.offsets)
+ {
+ BatchRecord record = records[index];
+
+ if (record.resultCode == ResultCode.NO_RESPONSE)
+ {
+ record.inDoubt = record.hasWrite;
+
+ if (record.inDoubt && policy.Txn != null) {
+ policy.Txn.OnWriteInDoubt(record.key);
+ }
+ }
+ }
+ }
+
+ protected internal override BatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new BatchOperateListCommand(cluster, batchNode, batchPolicy, records, status);
+ }
+
+ protected internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, (IList)records, sequenceAP, sequenceSC, batch, status);
+ }
+ }
+
+ //-------------------------------------------------------
+ // OperateArray
+ //-------------------------------------------------------
+
+ public sealed class BatchOperateArrayCommand : BatchCommand
+ {
+ private readonly Key[] keys;
+ private readonly Operation[] ops;
+ private readonly BatchRecord[] records;
+ private readonly BatchAttr attr;
+
+ public BatchOperateArrayCommand
+ (
+ Cluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ Operation[] ops,
+ BatchRecord[] records,
+ BatchAttr attr,
+ BatchStatus status
+ ) : base(cluster, batch, batchPolicy, status, ops != null)
+ {
+ this.keys = keys;
+ this.ops = ops;
+ this.records = records;
+ this.attr = attr;
+ }
+
+ protected internal override bool IsWrite()
+ {
+ return attr.hasWrite;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetBatchOperate(batchPolicy, keys, batch, null, ops, attr);
+ }
+
+ protected internal override bool ParseRow()
+ {
+ BatchRecord record = records[batchIndex];
+
+ ParseFields(record);
+
+ if (resultCode == 0)
+ {
+ record.SetRecord(ParseRecord());
+ }
+ else
+ {
+ record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter));
+ status.SetRowError();
+ }
+ return true;
+ }
+
+ protected internal override void InDoubt()
+ {
+ if (!attr.hasWrite)
+ {
+ return;
+ }
+
+ foreach (int index in batch.offsets)
+ {
+ BatchRecord record = records[index];
+
+ if (record.resultCode == ResultCode.NO_RESPONSE)
+ {
+ record.inDoubt = true;
+
+ if (policy.Txn != null) {
+ policy.Txn.OnWriteInDoubt(record.key);
+ }
+ }
+ }
+ }
+
+ protected internal override BatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, ops, records, attr, status);
+ }
+
+ protected internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status);
+ }
+ }
+
+ //-------------------------------------------------------
+ // UDF
+ //-------------------------------------------------------
+
+ public sealed class BatchUDFCommand : BatchCommand
+ {
+ private readonly Key[] keys;
+ private readonly string packageName;
+ private readonly string functionName;
+ private readonly byte[] argBytes;
+ private readonly BatchRecord[] records;
+ private readonly BatchAttr attr;
+
+ public BatchUDFCommand
+ (
+ Cluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ string packageName,
+ string functionName,
+ byte[] argBytes,
+ BatchRecord[] records,
+ BatchAttr attr,
+ BatchStatus status
+ ) : base(cluster, batch, batchPolicy, status, false)
+ {
+ this.keys = keys;
+ this.packageName = packageName;
+ this.functionName = functionName;
+ this.argBytes = argBytes;
+ this.records = records;
+ this.attr = attr;
+ }
+
+ protected internal override bool IsWrite()
+ {
+ return attr.hasWrite;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr);
+ }
+
+ protected internal override bool ParseRow()
+ {
+ BatchRecord record = records[batchIndex];
+
+ ParseFields(record);
+
+ if (resultCode == 0)
+ {
+ record.SetRecord(ParseRecord());
+ return true;
+ }
+
+ if (resultCode == ResultCode.UDF_BAD_RESPONSE)
+ {
+ Record r = ParseRecord();
+ string m = r.GetString("FAILURE");
+
+ if (m != null)
+ {
+ // Need to store record because failure bin contains an error message.
+ record.record = r;
+ record.resultCode = resultCode;
+ record.inDoubt = Command.BatchInDoubt(attr.hasWrite, commandSentCounter);
+ status.SetRowError();
+ return true;
+ }
+ }
+
+ record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter));
+ status.SetRowError();
+ return true;
+ }
+
+ protected internal override void InDoubt()
+ {
+ if (!attr.hasWrite)
+ {
+ return;
+ }
+
+ foreach (int index in batch.offsets)
+ {
+ BatchRecord record = records[index];
+
+ if (record.resultCode == ResultCode.NO_RESPONSE)
+ {
+ record.inDoubt = true;
+
+ if (policy.Txn != null) {
+ policy.Txn.OnWriteInDoubt(record.key);
+ }
+ }
+ }
+ }
+
+ protected internal override BatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new BatchUDFCommand(cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr, status);
+ }
+
+ protected internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status);
+ }
+ }
+
+ //-------------------------------------------------------
+ // MRT
+ //-------------------------------------------------------
+
+ public sealed class BatchTxnVerify : BatchCommand
+ {
+ private readonly Key[] keys;
+ private readonly long?[] versions;
+ private readonly BatchRecord[] records;
+
+ public BatchTxnVerify(
+ Cluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ long?[] versions,
+ BatchRecord[] records,
+ BatchStatus status
+ ) : base(cluster, batch, batchPolicy, status, false)
+ {
+ this.keys = keys;
+ this.versions = versions;
+ this.records = records;
+ }
+
+ protected internal override bool IsWrite()
+ {
+ return false;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetBatchTxnVerify(batchPolicy, keys, versions, batch);
+ }
+
+ protected internal override bool ParseRow()
+ {
+ SkipKey(fieldCount);
+
+ BatchRecord record = records[batchIndex];
+
+ if (resultCode == ResultCode.OK)
+ {
+ record.resultCode = resultCode;
+ }
+ else
+ {
+ record.SetError(resultCode, false);
+ status.SetRowError();
+ }
+ return true;
+ }
+
+ protected internal override BatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new BatchTxnVerify(cluster, batchNode, batchPolicy, keys, versions, records, status);
+ }
+
+ protected internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, false, status);
+ }
+ }
+
+ public sealed class BatchTxnRoll : BatchCommand
+ {
+ private readonly Txn txn;
+ private readonly Key[] keys;
+ private readonly BatchRecord[] records;
+ private readonly BatchAttr attr;
+
+ public BatchTxnRoll(
+ Cluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Txn txn,
+ Key[] keys,
+ BatchRecord[] records,
+ BatchAttr attr,
+ BatchStatus status
+ ) : base(cluster, batch, batchPolicy, status, false)
+ {
+ this.txn = txn;
+ this.keys = keys;
+ this.records = records;
+ this.attr = attr;
+ }
+
+ protected internal override bool IsWrite()
+ {
+ return attr.hasWrite;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetBatchTxnRoll(batchPolicy, txn, keys, batch, attr);
+ }
+
+ protected internal override bool ParseRow()
+ {
+ SkipKey(fieldCount);
+
+ BatchRecord record = records[batchIndex];
+
+ if (resultCode == 0)
+ {
+ record.resultCode = resultCode;
+ }
+ else
+ {
+ record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter));
+ status.SetRowError();
+ }
+ return true;
+ }
+
+ protected internal override void InDoubt()
+ {
+ if (!attr.hasWrite)
+ {
+ return;
+ }
+
+ foreach (int index in batch.offsets)
+ {
+ BatchRecord record = records[index];
+
+ if (record.resultCode == ResultCode.NO_RESPONSE)
+ {
+ record.inDoubt = true;
+ }
+ }
+ }
+
+ protected internal override BatchCommand CreateCommand(BatchNode batchNode)
+ {
+ return new BatchTxnRoll(cluster, batchNode, batchPolicy, txn, keys, records, attr, status);
+ }
+
+ protected internal override List GenerateBatchNodes()
+ {
+ return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status);
+ }
+ }
+
+ //-------------------------------------------------------
+ // Batch Base Command
+ //-------------------------------------------------------
+
+ public abstract class BatchCommand : MultiCommand
+ {
+ internal readonly BatchNode batch;
+ internal readonly BatchPolicy batchPolicy;
+ internal readonly BatchStatus status;
+ internal BatchExecutor parent;
+ internal uint sequenceAP;
+ internal uint sequenceSC;
+ internal bool splitRetry;
+
+ public BatchCommand
+ (
+ Cluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ BatchStatus status,
+ bool isOperation
+ ) : base(cluster, batchPolicy, batch.node, isOperation)
+ {
+ this.batch = batch;
+ this.batchPolicy = batchPolicy;
+ this.status = status;
+ }
+
+ public void Run(object obj)
+ {
+ try
+ {
+ Execute();
+ }
+ catch (AerospikeException ae)
+ {
+ if (ae.InDoubt)
+ {
+ SetInDoubt();
+ }
+ status.SetException(ae);
+ }
+ catch (Exception e)
+ {
+ SetInDoubt();
+ status.SetException(e);
+ }
+ finally
+ {
+ parent.OnComplete();
+ }
+ }
+
+ protected void ParseFieldsRead(Key key)
+ {
+ if (policy.Txn != null)
+ {
+ long? version = ParseVersion(fieldCount);
+ policy.Txn.OnRead(key, version);
+ }
+ else
+ {
+ SkipKey(fieldCount);
+ }
+ }
+
+ protected void ParseFields(BatchRecord br)
+ {
+ if (policy.Txn != null)
+ {
+ long? version = ParseVersion(fieldCount);
+
+ if (br.hasWrite)
+ {
+ policy.Txn.OnWrite(br.key, version, resultCode);
+ }
+ else
+ {
+ policy.Txn.OnRead(br.key, version);
+ }
+ }
+ else
+ {
+ SkipKey(fieldCount);
+ }
+ }
+
+ protected override Latency.LatencyType GetLatencyType()
+ {
+ return Latency.LatencyType.BATCH;
+ }
+
+ protected internal override bool PrepareRetry(bool timeout)
+ {
+ if (!((batchPolicy.replica == Replica.SEQUENCE || batchPolicy.replica == Replica.PREFER_RACK) &&
+ (parent == null || !parent.IsDone())))
+ {
+ // Perform regular retry to same node.
+ return true;
+ }
+ sequenceAP++;
+
+ if (!timeout || batchPolicy.readModeSC != ReadModeSC.LINEARIZE)
+ {
+ sequenceSC++;
+ }
+ return false;
+ }
+
+ protected internal override bool RetryBatch
+ (
+ Cluster cluster,
+ int socketTimeout,
+ int totalTimeout,
+ DateTime deadline,
+ int iteration,
+ int commandSentCounter
+ )
+ {
+ // Retry requires keys for this node to be split among other nodes.
+ // This is both recursive and exponential.
+ List batchNodes = GenerateBatchNodes();
+
+ if (batchNodes.Count == 1 && batchNodes[0].node == batch.node)
+ {
+ // Batch node is the same. Go through normal retry.
+ return false;
+ }
+
+ splitRetry = true;
+
+ // Run batch requests sequentially in same thread.
+ foreach (BatchNode batchNode in batchNodes)
+ {
+ BatchCommand command = CreateCommand(batchNode);
+ command.parent = parent;
+ command.sequenceAP = sequenceAP;
+ command.sequenceSC = sequenceSC;
+ command.socketTimeout = socketTimeout;
+ command.totalTimeout = totalTimeout;
+ command.iteration = iteration;
+ command.commandSentCounter = commandSentCounter;
+ command.deadline = deadline;
+
+ try
+ {
+ cluster.AddRetry();
+ command.ExecuteCommand();
+ }
+ catch (AerospikeException ae)
+ {
+ if (ae.InDoubt)
+ {
+ SetInDoubt();
+ }
+ status.SetException(ae);
+
+ if (!batchPolicy.respondAllKeys)
+ {
+ throw;
+ }
+ }
+ catch (Exception e)
+ {
+ if (!command.splitRetry)
+ {
+ SetInDoubt();
+ }
+ status.SetException(e);
+
+ if (!batchPolicy.respondAllKeys)
+ {
+ throw;
+ }
+ }
+ }
+ return true;
+ }
+
+ protected internal void SetInDoubt()
+ {
+ // Set error/inDoubt for keys associated this batch command when
+ // the command was not retried and split. If a split retry occurred,
+ // those new subcommands have already set inDoubt on the affected
+ // subset of keys.
+ if (!splitRetry)
+ {
+ InDoubt();
+ }
+ }
+
+ protected internal virtual void InDoubt()
+ {
+ // Do nothing by default. Batch writes will override this method.
+ }
+
+ protected internal abstract BatchCommand CreateCommand(BatchNode batchNode);
+ protected internal abstract List GenerateBatchNodes();
+ }
+}
diff --git a/AerospikeClient/Command/BatchAttr.cs b/AerospikeClient/Command/BatchAttr.cs
index 65c13b13..b58f03bb 100644
--- a/AerospikeClient/Command/BatchAttr.cs
+++ b/AerospikeClient/Command/BatchAttr.cs
@@ -22,7 +22,9 @@ public sealed class BatchAttr
public int readAttr;
public int writeAttr;
public int infoAttr;
+ public int txnAttr;
public int expiration;
+ public int opSize;
public short generation;
public bool hasWrite;
public bool sendKey;
@@ -394,5 +396,30 @@ public void SetDelete(BatchDeletePolicy dp)
infoAttr |= Command.INFO3_COMMIT_MASTER;
}
}
+
+ public void SetOpSize(Operation[] ops)
+ {
+ int dataOffset = 0;
+
+ foreach (Operation op in ops)
+ {
+ dataOffset += ByteUtil.EstimateSizeUtf8(op.binName) + Command.OPERATION_HEADER_SIZE;
+ dataOffset += op.value.EstimateSize();
+ }
+ opSize = dataOffset;
+ }
+
+ public void SetTxn(int attr)
+ {
+ filterExp = null;
+ readAttr = 0;
+ writeAttr = Command.INFO2_WRITE | Command.INFO2_RESPOND_ALL_OPS | Command.INFO2_DURABLE_DELETE;
+ infoAttr = 0;
+ txnAttr = attr;
+ expiration = 0;
+ generation = 0;
+ hasWrite = true;
+ sendKey = false;
+ }
}
}
diff --git a/AerospikeClient/Command/BatchExecutor.cs b/AerospikeClient/Command/BatchExecutor.cs
index 7fc82ac4..cd508292 100644
--- a/AerospikeClient/Command/BatchExecutor.cs
+++ b/AerospikeClient/Command/BatchExecutor.cs
@@ -1,167 +1,170 @@
-/*
- * Copyright 2012-2024 Aerospike, Inc.
- *
- * Portions may be licensed to Aerospike, Inc. under one or more contributor
- * license agreements.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-using System;
-using System.Threading;
-
-namespace Aerospike.Client
-{
- public sealed class BatchExecutor
- {
- public static void Execute(Cluster cluster, BatchPolicy policy, BatchCommand[] commands, BatchStatus status)
- {
- cluster.AddTran();
-
- if (policy.maxConcurrentThreads == 1 || commands.Length <= 1)
- {
- // Run batch requests sequentially in same thread.
- foreach (BatchCommand command in commands)
- {
- try
- {
- command.Execute();
- }
- catch (AerospikeException ae)
- {
- // Set error/inDoubt for keys associated this batch command when
- // the command was not retried and split. If a split retry occurred,
- // those new subcommands have already set error/inDoubt on the affected
- // subset of keys.
- if (!command.splitRetry)
- {
- command.SetInDoubt(ae.InDoubt);
- }
- status.SetException(ae);
-
- if (!policy.respondAllKeys)
- {
- throw;
- }
- }
- catch (Exception e)
- {
- if (!command.splitRetry)
- {
- command.SetInDoubt(true);
- }
- status.SetException(e);
-
- if (!policy.respondAllKeys)
- {
- throw;
- }
- }
- }
- status.CheckException();
- return;
- }
-
- // Run batch requests in parallel in separate threads.
- BatchExecutor executor = new BatchExecutor(policy, commands, status);
- executor.Execute();
- }
-
- public static void Execute(BatchCommand command, BatchStatus status)
- {
- command.Execute();
- status.CheckException();
- }
-
- private readonly BatchStatus status;
- private readonly int maxConcurrentThreads;
- private readonly BatchCommand[] commands;
- private int completedCount;
- private volatile int done;
- private bool completed;
-
- private BatchExecutor(BatchPolicy policy, BatchCommand[] commands, BatchStatus status)
- {
- this.commands = commands;
- this.status = status;
- this.maxConcurrentThreads = (policy.maxConcurrentThreads == 0 || policy.maxConcurrentThreads >= commands.Length) ? commands.Length : policy.maxConcurrentThreads;
- }
-
- internal void Execute()
- {
- // Start threads.
- for (int i = 0; i < maxConcurrentThreads; i++)
- {
- BatchCommand cmd = commands[i];
- cmd.parent = this;
- ThreadPool.UnsafeQueueUserWorkItem(cmd.Run, null);
- }
-
- // Multiple threads write to the batch record array/list, so one might think that memory barriers
- // are needed. That should not be necessary because of this synchronized waitTillComplete().
- WaitTillComplete();
-
- // Throw an exception if an error occurred.
- status.CheckException();
- }
-
- internal void OnComplete()
- {
- int finished = Interlocked.Increment(ref completedCount);
-
- if (finished < commands.Length)
- {
- int nextThread = finished + maxConcurrentThreads - 1;
-
- // Determine if a new thread needs to be started.
- if (nextThread < commands.Length && done == 0)
- {
- // Start new thread.
- BatchCommand cmd = commands[nextThread];
- cmd.parent = this;
- ThreadPool.UnsafeQueueUserWorkItem(cmd.Run, null);
- }
- }
- else
- {
- // Ensure executor succeeds or fails exactly once.
- if (Interlocked.Exchange(ref done, 1) == 0)
- {
- NotifyCompleted();
- }
- }
- }
-
- internal bool IsDone()
- {
- return done != 0;
- }
-
- private void WaitTillComplete()
- {
- lock (this)
- {
- while (!completed)
- {
- Monitor.Wait(this);
- }
- }
- }
-
- private void NotifyCompleted()
- {
- lock (this)
- {
- completed = true;
- Monitor.Pulse(this);
- }
- }
- }
-}
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+using System;
+using System.Threading;
+
+namespace Aerospike.Client
+{
+ public sealed class BatchExecutor
+ {
+ public static void Execute(Cluster cluster, BatchPolicy policy, BatchCommand[] commands, BatchStatus status)
+ {
+ cluster.AddCommandCount();
+
+ if (policy.maxConcurrentThreads == 1 || commands.Length <= 1)
+ {
+ // Run batch requests sequentially in same thread.
+ foreach (BatchCommand command in commands)
+ {
+ try
+ {
+ command.Execute();
+ }
+ catch (AerospikeException ae)
+ {
+ // Set error/inDoubt for keys associated this batch command when
+ // the command was not retried and split. If a split retry occurred,
+ // those new subcommands have already set error/inDoubt on the affected
+ // subset of keys.
+ if (!command.splitRetry)
+ {
+ if (ae.InDoubt)
+ {
+ command.SetInDoubt();
+ }
+ }
+ status.SetException(ae);
+
+ if (!policy.respondAllKeys)
+ {
+ throw;
+ }
+ }
+ catch (Exception e)
+ {
+ if (!command.splitRetry)
+ {
+ command.SetInDoubt();
+ }
+ status.SetException(e);
+
+ if (!policy.respondAllKeys)
+ {
+ throw;
+ }
+ }
+ }
+ status.CheckException();
+ return;
+ }
+
+ // Run batch requests in parallel in separate threads.
+ BatchExecutor executor = new BatchExecutor(policy, commands, status);
+ executor.Execute();
+ }
+
+ public static void Execute(BatchCommand command, BatchStatus status)
+ {
+ command.Execute();
+ status.CheckException();
+ }
+
+ private readonly BatchStatus status;
+ private readonly int maxConcurrentThreads;
+ private readonly BatchCommand[] commands;
+ private int completedCount;
+ private volatile int done;
+ private bool completed;
+
+ private BatchExecutor(BatchPolicy policy, BatchCommand[] commands, BatchStatus status)
+ {
+ this.commands = commands;
+ this.status = status;
+ this.maxConcurrentThreads = (policy.maxConcurrentThreads == 0 || policy.maxConcurrentThreads >= commands.Length) ? commands.Length : policy.maxConcurrentThreads;
+ }
+
+ internal void Execute()
+ {
+ // Start threads.
+ for (int i = 0; i < maxConcurrentThreads; i++)
+ {
+ BatchCommand cmd = commands[i];
+ cmd.parent = this;
+ ThreadPool.UnsafeQueueUserWorkItem(cmd.Run, null);
+ }
+
+ // Multiple threads write to the batch record array/list, so one might think that memory barriers
+ // are needed. That should not be necessary because of this synchronized waitTillComplete().
+ WaitTillComplete();
+
+ // Throw an exception if an error occurred.
+ status.CheckException();
+ }
+
+ internal void OnComplete()
+ {
+ int finished = Interlocked.Increment(ref completedCount);
+
+ if (finished < commands.Length)
+ {
+ int nextThread = finished + maxConcurrentThreads - 1;
+
+ // Determine if a new thread needs to be started.
+ if (nextThread < commands.Length && done == 0)
+ {
+ // Start new thread.
+ BatchCommand cmd = commands[nextThread];
+ cmd.parent = this;
+ ThreadPool.UnsafeQueueUserWorkItem(cmd.Run, null);
+ }
+ }
+ else
+ {
+ // Ensure executor succeeds or fails exactly once.
+ if (Interlocked.Exchange(ref done, 1) == 0)
+ {
+ NotifyCompleted();
+ }
+ }
+ }
+
+ internal bool IsDone()
+ {
+ return done != 0;
+ }
+
+ private void WaitTillComplete()
+ {
+ lock (this)
+ {
+ while (!completed)
+ {
+ Monitor.Wait(this);
+ }
+ }
+ }
+
+ private void NotifyCompleted()
+ {
+ lock (this)
+ {
+ completed = true;
+ Monitor.Pulse(this);
+ }
+ }
+ }
+}
diff --git a/AerospikeClient/Command/ByteUtil.cs b/AerospikeClient/Command/ByteUtil.cs
index fbb2a2e5..456fcd1d 100644
--- a/AerospikeClient/Command/ByteUtil.cs
+++ b/AerospikeClient/Command/ByteUtil.cs
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements.
@@ -41,7 +41,7 @@ public static Value BytesToKeyValue(ParticleType type, byte[] buf, int offset, i
case ParticleType.BLOB:
byte[] dest = new byte[len];
Array.Copy(buf, offset, dest, 0, len);
- return Value.Get(dest);
+ return Value.Get(dest);
default:
return null;
@@ -424,15 +424,52 @@ public static long LittleBytesToLong(byte[] buf, int offset)
);
}
- //-------------------------------------------------------
- // 32 bit number conversions.
- //-------------------------------------------------------
-
- ///
- /// Convert int to big endian 32 bits.
- /// The bit pattern will be the same regardless of sign.
+ //-------------------------------------------------------
+ // Transaction version conversions.
+ //-------------------------------------------------------
+
+ ///
+ /// Convert long to a 7 byte record version for MRT.
+ ///
+ public static void LongToVersionBytes(long v, byte[] buf, int offset)
+ {
+ buf[offset++] = (byte)(v >> 0);
+ buf[offset++] = (byte)(v >> 8);
+ buf[offset++] = (byte)(v >> 16);
+ buf[offset++] = (byte)(v >> 24);
+ buf[offset++] = (byte)(v >> 32);
+ buf[offset++] = (byte)(v >> 40);
+ buf[offset] = (byte)(v >> 48);
+ }
+
+ ///
+ /// Convert 7 byte record version to a long for MRT.
///
- public static int IntToBytes(uint v, byte[] buf, int offset)
+ ///
+ ///
+ ///
+ public static long VersionBytesToLong(byte[] buf, int offset)
+ {
+ return (
+ ((long)(buf[offset] & 0xFF) << 0) |
+ ((long)(buf[offset + 1] & 0xFF) << 8) |
+ ((long)(buf[offset + 2] & 0xFF) << 16) |
+ ((long)(buf[offset + 3] & 0xFF) << 24) |
+ ((long)(buf[offset + 4] & 0xFF) << 32) |
+ ((long)(buf[offset + 5] & 0xFF) << 40) |
+ ((long)(buf[offset + 6] & 0xFF) << 48)
+ );
+ }
+
+ //-------------------------------------------------------
+ // 32 bit number conversions.
+ //-------------------------------------------------------
+
+ ///
+ /// Convert int to big endian 32 bits.
+ /// The bit pattern will be the same regardless of sign.
+ ///
+ public static int IntToBytes(uint v, byte[] buf, int offset)
{
// Benchmarks show that custom conversion is faster than System.BitConverter.GetBytes().
// Assume little endian machine and reverse/convert in one pass.
diff --git a/AerospikeClient/Command/Command.cs b/AerospikeClient/Command/Command.cs
index 0f0fe138..7c38a9e8 100644
--- a/AerospikeClient/Command/Command.cs
+++ b/AerospikeClient/Command/Command.cs
@@ -1,2362 +1,3311 @@
-/*
- * Copyright 2012-2024 Aerospike, Inc.
- *
- * Portions may be licensed to Aerospike, Inc. under one or more contributor
- * license agreements.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-using System.Collections;
-
-#pragma warning disable 0618
-
-namespace Aerospike.Client
-{
- public abstract class Command
- {
- public static readonly int INFO1_READ = (1 << 0); // Contains a read operation.
- public static readonly int INFO1_GET_ALL = (1 << 1); // Get all bins.
- public static readonly int INFO1_SHORT_QUERY = (1 << 2); // Short query.
- public static readonly int INFO1_BATCH = (1 << 3); // Batch read or exists.
- public static readonly int INFO1_NOBINDATA = (1 << 5); // Do not read the bins.
- public static readonly int INFO1_READ_MODE_AP_ALL = (1 << 6); // Involve all replicas in read operation.
- public static readonly int INFO1_COMPRESS_RESPONSE = (1 << 7); // Tell server to compress it's response.
-
- public static readonly int INFO2_WRITE = (1 << 0); // Create or update record
- public static readonly int INFO2_DELETE = (1 << 1); // Fling a record into the belly of Moloch.
- public static readonly int INFO2_GENERATION = (1 << 2); // Update if expected generation == old.
- public static readonly int INFO2_GENERATION_GT = (1 << 3); // Update if new generation >= old, good for restore.
- public static readonly int INFO2_DURABLE_DELETE = (1 << 4); // Transaction resulting in record deletion leaves tombstone (Enterprise only).
- public static readonly int INFO2_CREATE_ONLY = (1 << 5); // Create only. Fail if record already exists.
- public static readonly int INFO2_RELAX_AP_LONG_QUERY = (1 << 6); // Treat as long query, but relac read consistency
- public static readonly int INFO2_RESPOND_ALL_OPS = (1 << 7); // Return a result for every operation.
-
- public static readonly int INFO3_LAST = (1 << 0); // This is the last of a multi-part message.
- public static readonly int INFO3_COMMIT_MASTER = (1 << 1); // Commit to master only before declaring success.
- // On send: Do not return partition done in scan/query.
- // On receive: Specified partition is done in scan/query.
- public static readonly int INFO3_PARTITION_DONE = (1 << 2);
- public static readonly int INFO3_UPDATE_ONLY = (1 << 3); // Update only. Merge bins.
- public static readonly int INFO3_CREATE_OR_REPLACE = (1 << 4); // Create or completely replace record.
- public static readonly int INFO3_REPLACE_ONLY = (1 << 5); // Completely replace existing record only.
- public static readonly int INFO3_SC_READ_TYPE = (1 << 6); // See below.
- public static readonly int INFO3_SC_READ_RELAX = (1 << 7); // See below.
-
- // Interpret SC_READ bits in info3.
- //
- // RELAX TYPE
- // strict
- // ------
- // 0 0 sequential (default)
- // 0 1 linearize
- //
- // relaxed
- // -------
- // 1 0 allow replica
- // 1 1 allow unavailable
-
- public const byte STATE_READ_AUTH_HEADER = 1;
- public const byte STATE_READ_HEADER = 2;
- public const byte STATE_READ_DETAIL = 3;
- public const byte STATE_COMPLETE = 4;
-
- public const byte BATCH_MSG_READ = 0x0;
- public const byte BATCH_MSG_REPEAT = 0x1;
- public const byte BATCH_MSG_INFO = 0x2;
- public const byte BATCH_MSG_GEN = 0x4;
- public const byte BATCH_MSG_TTL = 0x8;
-
- public const int MSG_TOTAL_HEADER_SIZE = 30;
- public const int FIELD_HEADER_SIZE = 5;
- public const int OPERATION_HEADER_SIZE = 8;
- public const int MSG_REMAINING_HEADER_SIZE = 22;
- public const int DIGEST_SIZE = 20;
- public const int COMPRESS_THRESHOLD = 128;
- public const ulong CL_MSG_VERSION = 2UL;
- public const ulong AS_MSG_TYPE = 3UL;
- public const ulong MSG_TYPE_COMPRESSED = 4UL;
-
- internal byte[] dataBuffer;
- internal int dataOffset;
- internal readonly int maxRetries;
- internal readonly int serverTimeout;
- internal int socketTimeout;
- internal int totalTimeout;
-
- public Command(int socketTimeout, int totalTimeout, int maxRetries)
- {
- this.maxRetries = maxRetries;
- this.totalTimeout = totalTimeout;
-
- if (totalTimeout > 0)
- {
- this.socketTimeout = (socketTimeout < totalTimeout && socketTimeout > 0) ? socketTimeout : totalTimeout;
- this.serverTimeout = this.socketTimeout;
- }
- else
- {
- this.socketTimeout = socketTimeout;
- this.serverTimeout = 0;
- }
- }
-
- //--------------------------------------------------
- // Writes
- //--------------------------------------------------
-
- public virtual void SetWrite(WritePolicy policy, Operation.Type operation, Key key, Bin[] bins)
- {
- Begin();
- int fieldCount = EstimateKeySize(policy, key);
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
-
- foreach (Bin bin in bins)
- {
- EstimateOperationSize(bin);
- }
-
- bool compress = SizeBuffer(policy);
-
- WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, bins.Length);
- WriteKey(policy, key);
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
-
- foreach (Bin bin in bins)
- {
- WriteOperation(bin, operation);
- }
- End(compress);
- }
-
- public virtual void SetDelete(WritePolicy policy, Key key)
- {
- Begin();
- int fieldCount = EstimateKeySize(policy, key);
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
- SizeBuffer();
- WriteHeaderWrite(policy, Command.INFO2_WRITE | Command.INFO2_DELETE, fieldCount, 0);
- WriteKey(policy, key);
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
- End();
- }
-
- public virtual void SetTouch(WritePolicy policy, Key key)
- {
- Begin();
- int fieldCount = EstimateKeySize(policy, key);
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
- EstimateOperationSize();
- SizeBuffer();
- WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 1);
- WriteKey(policy, key);
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
- WriteOperation(Operation.Type.TOUCH);
- End();
- }
-
- //--------------------------------------------------
- // Reads
- //--------------------------------------------------
-
- public virtual void SetExists(Policy policy, Key key)
- {
- Begin();
- int fieldCount = EstimateKeySize(policy, key);
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
- SizeBuffer();
- WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0);
- WriteKey(policy, key);
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
- End();
- }
-
- public virtual void SetRead(Policy policy, Key key)
- {
- Begin();
- int fieldCount = EstimateKeySize(policy, key);
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
- SizeBuffer();
- WriteHeaderRead(policy, serverTimeout, Command.INFO1_READ | Command.INFO1_GET_ALL, 0, 0, fieldCount, 0);
- WriteKey(policy, key);
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
- End();
- }
-
- public virtual void SetRead(Policy policy, Key key, string[] binNames)
- {
- if (binNames != null)
- {
- Begin();
- int fieldCount = EstimateKeySize(policy, key);
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
-
- foreach (string binName in binNames)
- {
- EstimateOperationSize(binName);
- }
- SizeBuffer();
- WriteHeaderRead(policy, serverTimeout, Command.INFO1_READ, 0, 0, fieldCount, binNames.Length);
- WriteKey(policy, key);
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
-
- foreach (string binName in binNames)
- {
- WriteOperation(binName, Operation.Type.READ);
- }
- End();
- }
- else
- {
- SetRead(policy, key);
- }
- }
-
- public virtual void SetReadHeader(Policy policy, Key key)
- {
- Begin();
- int fieldCount = EstimateKeySize(policy, key);
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
- EstimateOperationSize((string)null);
- SizeBuffer();
- WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0);
- WriteKey(policy, key);
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
- End();
- }
-
- //--------------------------------------------------
- // Operate
- //--------------------------------------------------
-
- public virtual void SetOperate(WritePolicy policy, Key key, OperateArgs args)
- {
- Begin();
- int fieldCount = EstimateKeySize(policy, key);
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
- dataOffset += args.size;
-
- bool compress = SizeBuffer(policy);
-
- WriteHeaderReadWrite(policy, args, fieldCount);
- WriteKey(policy, key);
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
-
- foreach (Operation operation in args.operations)
- {
- WriteOperation(operation);
- }
- End(compress);
- }
-
- //--------------------------------------------------
- // UDF
- //--------------------------------------------------
-
- public virtual void SetUdf(WritePolicy policy, Key key, string packageName, string functionName, Value[] args)
- {
- Begin();
- int fieldCount = EstimateKeySize(policy, key);
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
- byte[] argBytes = Packer.Pack(args);
- fieldCount += EstimateUdfSize(packageName, functionName, argBytes);
-
- bool compress = SizeBuffer(policy);
-
- WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 0);
- WriteKey(policy, key);
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
- WriteField(packageName, FieldType.UDF_PACKAGE_NAME);
- WriteField(functionName, FieldType.UDF_FUNCTION);
- WriteField(argBytes, FieldType.UDF_ARGLIST);
- End(compress);
- }
-
- //--------------------------------------------------
- // Batch Read Only
- //--------------------------------------------------
-
- public virtual void SetBatchRead(BatchPolicy policy, List records, BatchNode batch)
- {
- // Estimate full row size
- int[] offsets = batch.offsets;
- int max = batch.offsetsSize;
- BatchRead prev = null;
-
- Begin();
- int fieldCount = 1;
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
-
- dataOffset += FIELD_HEADER_SIZE + 5;
-
- for (int i = 0; i < max; i++)
- {
- BatchRead record = records[offsets[i]];
- Key key = record.key;
- string[] binNames = record.binNames;
- Operation[] ops = record.ops;
-
- dataOffset += key.digest.Length + 4;
-
- // Avoid relatively expensive full equality checks for performance reasons.
- // Use reference equality only in hope that common namespaces/bin names are set from
- // fixed variables. It's fine if equality not determined correctly because it just
- // results in more space used. The batch will still be correct.
- if (prev != null && prev.key.ns == key.ns && prev.key.setName == key.setName &&
- prev.binNames == binNames && prev.readAllBins == record.readAllBins &&
- prev.ops == ops)
- {
- // Can set repeat previous namespace/bin names to save space.
- dataOffset++;
- }
- else
- {
- // Estimate full header, namespace and bin names.
- dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6;
- dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
-
- if (binNames != null)
- {
- foreach (string binName in binNames)
- {
- EstimateOperationSize(binName);
- }
- }
- else if (ops != null)
- {
- foreach (Operation op in ops)
- {
- EstimateReadOperationSize(op);
- }
- }
- prev = record;
- }
- }
-
- bool compress = SizeBuffer(policy);
-
- int readAttr = Command.INFO1_READ;
-
- if (policy.readModeAP == ReadModeAP.ALL)
- {
- readAttr |= Command.INFO1_READ_MODE_AP_ALL;
- }
-
- WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0);
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
-
- int fieldSizeOffset = dataOffset;
- WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
-
- ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset);
- dataOffset += 4;
- dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0;
- prev = null;
-
- for (int i = 0; i < max; i++)
- {
- int index = offsets[i];
- ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset);
- dataOffset += 4;
-
- BatchRead record = records[index];
- Key key = record.key;
- string[] binNames = record.binNames;
- Operation[] ops = record.ops;
- byte[] digest = key.digest;
- Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length);
- dataOffset += digest.Length;
-
- // Avoid relatively expensive full equality checks for performance reasons.
- // Use reference equality only in hope that common namespaces/bin names are set from
- // fixed variables. It's fine if equality not determined correctly because it just
- // results in more space used. The batch will still be correct.
- if (prev != null && prev.key.ns == key.ns && prev.key.setName == key.setName &&
- prev.binNames == binNames && prev.readAllBins == record.readAllBins &&
- prev.ops == ops)
- {
- // Can set repeat previous namespace/bin names to save space.
- dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
- }
- else
- {
- // Write full header, namespace and bin names.
- dataBuffer[dataOffset++] = BATCH_MSG_READ;
-
- if (binNames != null && binNames.Length != 0)
- {
- dataBuffer[dataOffset++] = (byte)readAttr;
- WriteBatchFields(key, 0, binNames.Length);
-
- foreach (string binName in binNames)
- {
- WriteOperation(binName, Operation.Type.READ);
- }
- }
- else if (ops != null)
- {
- int offset = dataOffset++;
- WriteBatchFields(key, 0, ops.Length);
- dataBuffer[offset] = (byte)WriteReadOnlyOperations(ops, readAttr);
- }
- else
- {
- dataBuffer[dataOffset++] = (byte)(readAttr | (record.readAllBins ? Command.INFO1_GET_ALL : Command.INFO1_NOBINDATA));
- WriteBatchFields(key, 0, 0);
- }
- prev = record;
- }
- }
-
- // Write real field size.
- ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset);
- End(compress);
- }
-
- public virtual void SetBatchRead
- (
- BatchPolicy policy,
- Key[] keys,
- BatchNode batch,
- string[] binNames,
- Operation[] ops,
- int readAttr
- )
- {
- // Estimate full row size
- int[] offsets = batch.offsets;
- int max = batch.offsetsSize;
-
- // Estimate dataBuffer size.
- Begin();
- int fieldCount = 1;
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
- dataOffset += FIELD_HEADER_SIZE + 5;
-
- Key prev = null;
-
- for (int i = 0; i < max; i++)
- {
- Key key = keys[offsets[i]];
-
- dataOffset += key.digest.Length + 4;
-
- // Try reference equality in hope that namespace for all keys is set from a fixed variable.
- if (prev != null && prev.ns == key.ns && prev.setName == key.setName)
- {
- // Can set repeat previous namespace/bin names to save space.
- dataOffset++;
- }
- else
- {
- // Estimate full header, namespace and bin names.
- dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6;
- dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
-
- if (binNames != null)
- {
- foreach (String binName in binNames)
- {
- EstimateOperationSize(binName);
- }
- }
- else if (ops != null)
- {
- foreach (Operation op in ops)
- {
- EstimateReadOperationSize(op);
- }
- }
- prev = key;
- }
- }
-
- bool compress = SizeBuffer(policy);
-
- if (policy.readModeAP == ReadModeAP.ALL)
- {
- readAttr |= Command.INFO1_READ_MODE_AP_ALL;
- }
-
- WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0);
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
-
- int fieldSizeOffset = dataOffset;
- WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
-
- ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset);
- dataOffset += 4;
- dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0;
- prev = null;
-
- for (int i = 0; i < max; i++)
- {
- int index = offsets[i];
- ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset);
- dataOffset += 4;
-
- Key key = keys[index];
- byte[] digest = key.digest;
- Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length);
- dataOffset += digest.Length;
-
- // Try reference equality in hope that namespace for all keys is set from a fixed variable.
- if (prev != null && prev.ns == key.ns && prev.setName == key.setName)
- {
- // Can set repeat previous namespace/bin names to save space.
- dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
- }
- else
- {
- // Write full header, namespace and bin names.
- dataBuffer[dataOffset++] = BATCH_MSG_READ;
-
- if (binNames != null && binNames.Length != 0)
- {
- dataBuffer[dataOffset++] = (byte)readAttr;
- WriteBatchFields(key, 0, binNames.Length);
-
- foreach (String binName in binNames)
- {
- WriteOperation(binName, Operation.Type.READ);
- }
- }
- else if (ops != null)
- {
- int offset = dataOffset++;
- WriteBatchFields(key, 0, ops.Length);
- dataBuffer[offset] = (byte)WriteReadOnlyOperations(ops, readAttr);
- }
- else
- {
- dataBuffer[dataOffset++] = (byte)readAttr;
- WriteBatchFields(key, 0, 0);
- }
- prev = key;
- }
- }
-
- // Write real field size.
- ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset);
- End(compress);
- }
-
- //--------------------------------------------------
- // Batch Read/Write Operations
- //--------------------------------------------------
-
- public virtual void SetBatchOperate(BatchPolicy policy, IList records, BatchNode batch)
- {
- // Estimate full row size
- int[] offsets = batch.offsets;
- int max = batch.offsetsSize;
- BatchRecord prev = null;
-
- Begin();
- int fieldCount = 1;
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
-
- dataOffset += FIELD_HEADER_SIZE + 5;
-
- for (int i = 0; i < max; i++)
- {
- BatchRecord record = (BatchRecord)records[offsets[i]];
- Key key = record.key;
-
- dataOffset += key.digest.Length + 4;
-
- // Avoid relatively expensive full equality checks for performance reasons.
- // Use reference equality only in hope that common namespaces/bin names are set from
- // fixed variables. It's fine if equality not determined correctly because it just
- // results in more space used. The batch will still be correct.
- if (!policy.sendKey && prev != null && prev.key.ns == key.ns &&
- prev.key.setName == key.setName && record.Equals(prev))
- {
- // Can set repeat previous namespace/bin names to save space.
- dataOffset++;
- }
- else
- {
- // Estimate full header, namespace and bin names.
- dataOffset += 12;
- dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE;
- dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
- dataOffset += record.Size(policy);
- prev = record;
- }
- }
-
- bool compress = SizeBuffer(policy);
-
- WriteBatchHeader(policy, totalTimeout, fieldCount);
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
-
- int fieldSizeOffset = dataOffset;
- WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
-
- ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset);
- dataOffset += 4;
- dataBuffer[dataOffset++] = GetBatchFlags(policy);
-
- BatchAttr attr = new BatchAttr();
- prev = null;
-
- for (int i = 0; i < max; i++)
- {
- int index = offsets[i];
- ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset);
- dataOffset += 4;
-
- BatchRecord record = (BatchRecord)records[index];
- Key key = record.key;
- byte[] digest = key.digest;
- Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length);
- dataOffset += digest.Length;
-
- // Avoid relatively expensive full equality checks for performance reasons.
- // Use reference equality only in hope that common namespaces/bin names are set from
- // fixed variables. It's fine if equality not determined correctly because it just
- // results in more space used. The batch will still be correct.
- if (!policy.sendKey && prev != null && prev.key.ns == key.ns &&
- prev.key.setName == key.setName && record.Equals(prev))
- {
- // Can set repeat previous namespace/bin names to save space.
- dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
- }
- else
- {
- // Write full message.
- switch (record.GetBatchType())
- {
- case BatchRecord.Type.BATCH_READ:
- {
- BatchRead br = (BatchRead)record;
-
- if (br.policy != null)
- {
- attr.SetRead(br.policy);
- }
- else
- {
- attr.SetRead(policy);
- }
-
- if (br.binNames != null)
- {
- WriteBatchBinNames(key, br.binNames, attr, attr.filterExp);
- }
- else if (br.ops != null)
- {
- attr.AdjustRead(br.ops);
- WriteBatchOperations(key, br.ops, attr, attr.filterExp);
- }
- else
- {
- attr.AdjustRead(br.readAllBins);
- WriteBatchRead(key, attr, attr.filterExp, 0);
- }
- break;
- }
-
- case BatchRecord.Type.BATCH_WRITE:
- {
- BatchWrite bw = (BatchWrite)record;
-
- if (bw.policy != null)
- {
- attr.SetWrite(bw.policy);
- }
- else
- {
- attr.SetWrite(policy);
- }
- attr.AdjustWrite(bw.ops);
- WriteBatchOperations(key, bw.ops, attr, attr.filterExp);
- break;
- }
-
- case BatchRecord.Type.BATCH_UDF:
- {
- BatchUDF bu = (BatchUDF)record;
-
- if (bu.policy != null)
- {
- attr.SetUDF(bu.policy);
- }
- else
- {
- attr.SetUDF(policy);
- }
- WriteBatchWrite(key, attr, attr.filterExp, 3, 0);
- WriteField(bu.packageName, FieldType.UDF_PACKAGE_NAME);
- WriteField(bu.functionName, FieldType.UDF_FUNCTION);
- WriteField(bu.argBytes, FieldType.UDF_ARGLIST);
- break;
- }
-
- case BatchRecord.Type.BATCH_DELETE:
- {
- BatchDelete bd = (BatchDelete)record;
-
- if (bd.policy != null)
- {
- attr.SetDelete(bd.policy);
- }
- else
- {
- attr.SetDelete(policy);
- }
- WriteBatchWrite(key, attr, attr.filterExp, 0, 0);
- break;
- }
- }
- prev = record;
- }
- }
-
- // Write real field size.
- ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset);
- End(compress);
- }
-
- public virtual void SetBatchOperate
- (
- BatchPolicy policy,
- Key[] keys,
- BatchNode batch,
- string[] binNames,
- Operation[] ops,
- BatchAttr attr
- )
- {
- // Estimate full row size
- int[] offsets = batch.offsets;
- int max = batch.offsetsSize;
-
- // Estimate dataBuffer size.
- Begin();
- int fieldCount = 1;
- Expression exp = GetBatchExpression(policy, attr);
-
- if (exp != null)
- {
- dataOffset += exp.Size();
- fieldCount++;
- }
-
- dataOffset += FIELD_HEADER_SIZE + 5;
-
- Key prev = null;
-
- for (int i = 0; i < max; i++)
- {
- Key key = keys[offsets[i]];
-
- dataOffset += key.digest.Length + 4;
-
- // Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if (!attr.sendKey && prev != null && prev.ns == key.ns &&
- prev.setName == key.setName)
- {
- // Can set repeat previous namespace/bin names to save space.
- dataOffset++;
- }
- else
- {
- // Write full header and namespace/set/bin names.
- dataOffset += 12; // header(4) + ttl(4) + fielCount(2) + opCount(2) = 12
- dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE;
- dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
-
- if (attr.sendKey)
- {
- dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1;
- }
-
- if (binNames != null)
- {
- foreach (string binName in binNames)
- {
- EstimateOperationSize(binName);
- }
- }
- else if (ops != null)
- {
- foreach (Operation op in ops)
- {
- if (Operation.IsWrite(op.type))
- {
- if (!attr.hasWrite)
- {
- throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in batch read");
- }
- dataOffset += 2; // Extra write specific fields.
- }
- EstimateOperationSize(op);
- }
- }
- else if ((attr.writeAttr & Command.INFO2_DELETE) != 0)
- {
- dataOffset += 2; // Extra write specific fields.
- }
- prev = key;
- }
- }
-
- bool compress = SizeBuffer(policy);
-
- WriteBatchHeader(policy, totalTimeout, fieldCount);
-
- if (exp != null)
- {
- exp.Write(this);
- }
-
- int fieldSizeOffset = dataOffset;
- WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
-
- ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset);
- dataOffset += 4;
- dataBuffer[dataOffset++] = GetBatchFlags(policy);
- prev = null;
-
- for (int i = 0; i < max; i++)
- {
- int index = offsets[i];
- ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset);
- dataOffset += 4;
-
- Key key = keys[index];
- byte[] digest = key.digest;
- Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length);
- dataOffset += digest.Length;
-
- // Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if (!attr.sendKey && prev != null && prev.ns == key.ns &&
- prev.setName == key.setName)
- {
- // Can set repeat previous namespace/bin names to save space.
- dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
- }
- else
- {
- // Write full message.
- if (binNames != null)
- {
- WriteBatchBinNames(key, binNames, attr, null);
- }
- else if (ops != null)
- {
- WriteBatchOperations(key, ops, attr, null);
- }
- else if ((attr.writeAttr & Command.INFO2_DELETE) != 0)
- {
- WriteBatchWrite(key, attr, null, 0, 0);
- }
- else
- {
- WriteBatchRead(key, attr, null, 0);
- }
- prev = key;
- }
- }
-
- // Write real field size.
- ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset);
- End(compress);
- }
-
- public virtual void SetBatchUDF
- (
- BatchPolicy policy,
- Key[] keys,
- BatchNode batch,
- string packageName,
- string functionName,
- byte[] argBytes,
- BatchAttr attr
- )
- {
- // Estimate full row size
- int[] offsets = batch.offsets;
- int max = batch.offsetsSize;
-
- // Estimate dataBuffer size.
- Begin();
- int fieldCount = 1;
- Expression exp = GetBatchExpression(policy, attr);
-
- if (exp != null)
- {
- dataOffset += exp.Size();
- fieldCount++;
- }
-
- dataOffset += FIELD_HEADER_SIZE + 5;
-
- Key prev = null;
-
- for (int i = 0; i < max; i++)
- {
- Key key = keys[offsets[i]];
-
- dataOffset += key.digest.Length + 4;
-
- // Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if (!attr.sendKey && prev != null && prev.ns == key.ns &&
- prev.setName == key.setName)
- {
- // Can set repeat previous namespace/bin names to save space.
- dataOffset++;
- }
- else
- {
- // Write full header and namespace/set/bin names.
- dataOffset += 12; // header(4) + ttl(4) + fielCount(2) + opCount(2) = 12
- dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE;
- dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
-
- if (attr.sendKey)
- {
- dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1;
- }
- dataOffset += 2; // gen(2) = 6
- EstimateUdfSize(packageName, functionName, argBytes);
- prev = key;
- }
- }
-
- bool compress = SizeBuffer(policy);
-
- WriteBatchHeader(policy, totalTimeout, fieldCount);
-
- if (exp != null)
- {
- exp.Write(this);
- }
-
- int fieldSizeOffset = dataOffset;
- WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
-
- ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset);
- dataOffset += 4;
- dataBuffer[dataOffset++] = GetBatchFlags(policy);
- prev = null;
-
- for (int i = 0; i < max; i++)
- {
- int index = offsets[i];
- ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset);
- dataOffset += 4;
-
- Key key = keys[index];
- byte[] digest = key.digest;
- Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length);
- dataOffset += digest.Length;
-
- // Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if (!attr.sendKey && prev != null && prev.ns == key.ns &&
- prev.setName == key.setName)
- {
- // Can set repeat previous namespace/bin names to save space.
- dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
- }
- else
- {
- // Write full message.
- WriteBatchWrite(key, attr, null, 3, 0);
- WriteField(packageName, FieldType.UDF_PACKAGE_NAME);
- WriteField(functionName, FieldType.UDF_FUNCTION);
- WriteField(argBytes, FieldType.UDF_ARGLIST);
- prev = key;
- }
- }
-
- // Write real field size.
- ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset);
- End(compress);
- }
-
- private static Expression GetBatchExpression(Policy policy, BatchAttr attr)
- {
- return (attr.filterExp != null) ? attr.filterExp : policy.filterExp;
- }
-
- private static byte GetBatchFlags(BatchPolicy policy)
- {
- byte flags = 0x8;
-
- if (policy.allowInline)
- {
- flags |= 0x1;
- }
-
- if (policy.allowInlineSSD)
- {
- flags |= 0x2;
- }
-
- if (policy.respondAllKeys)
- {
- flags |= 0x4;
- }
- return flags;
- }
-
- private void WriteBatchHeader(Policy policy, int timeout, int fieldCount)
- {
- int readAttr = Command.INFO1_BATCH;
-
- if (policy.compress)
- {
- readAttr |= Command.INFO1_COMPRESS_RESPONSE;
- }
-
- // Write all header data except total size which must be written last.
- dataOffset += 8;
- dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length.
- dataBuffer[dataOffset++] = (byte)readAttr;
-
- Array.Clear(dataBuffer, dataOffset, 12);
- dataOffset += 12;
-
- dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset);
- dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
- dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset);
- }
-
- private void WriteBatchBinNames(Key key, string[] binNames, BatchAttr attr, Expression filter)
- {
- WriteBatchRead(key, attr, filter, binNames.Length);
-
- foreach (string binName in binNames)
- {
- WriteOperation(binName, Operation.Type.READ);
- }
- }
-
- private void WriteBatchOperations(Key key, Operation[] ops, BatchAttr attr, Expression filter)
- {
- if (attr.hasWrite)
- {
- WriteBatchWrite(key, attr, filter, 0, ops.Length);
- }
- else
- {
- WriteBatchRead(key, attr, filter, ops.Length);
- }
-
- foreach (Operation op in ops)
- {
- WriteOperation(op);
- }
- }
-
- private void WriteBatchRead(Key key, BatchAttr attr, Expression filter, int opCount)
- {
- dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_TTL);
- dataBuffer[dataOffset++] = (byte)attr.readAttr;
- dataBuffer[dataOffset++] = (byte)attr.writeAttr;
- dataBuffer[dataOffset++] = (byte)attr.infoAttr;
- dataOffset += ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset);
- WriteBatchFields(key, filter, 0, opCount);
- }
-
- private void WriteBatchWrite(Key key, BatchAttr attr, Expression filter, int fieldCount, int opCount)
- {
- dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_GEN | BATCH_MSG_TTL);
- dataBuffer[dataOffset++] = (byte)attr.readAttr;
- dataBuffer[dataOffset++] = (byte)attr.writeAttr;
- dataBuffer[dataOffset++] = (byte)attr.infoAttr;
- dataOffset += ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset);
- dataOffset += ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset);
-
- if (attr.sendKey)
- {
- fieldCount++;
- WriteBatchFields(key, filter, fieldCount, opCount);
- WriteField(key.userKey, FieldType.KEY);
- }
- else
- {
- WriteBatchFields(key, filter, fieldCount, opCount);
- }
- }
-
- private void WriteBatchFields(Key key, Expression filter, int fieldCount, int opCount)
- {
- if (filter != null)
- {
- fieldCount++;
- WriteBatchFields(key, fieldCount, opCount);
- filter.Write(this);
- }
- else
- {
- WriteBatchFields(key, fieldCount, opCount);
- }
- }
-
- private void WriteBatchFields(Key key, int fieldCount, int opCount)
- {
- fieldCount += 2;
- dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
- dataOffset += ByteUtil.ShortToBytes((ushort)opCount, dataBuffer, dataOffset);
- WriteField(key.ns, FieldType.NAMESPACE);
- WriteField(key.setName, FieldType.TABLE);
- }
-
- //--------------------------------------------------
- // Scan
- //--------------------------------------------------
-
- public virtual void SetScan
- (
- Cluster cluster,
- ScanPolicy policy,
- string ns,
- string setName,
- string[] binNames,
- ulong taskId,
- NodePartitions nodePartitions
- )
- {
- Begin();
- int fieldCount = 0;
- int partsFullSize = nodePartitions.partsFull.Count * 2;
- int partsPartialSize = nodePartitions.partsPartial.Count * 20;
- long maxRecords = nodePartitions.recordMax;
-
- if (ns != null)
- {
- dataOffset += ByteUtil.EstimateSizeUtf8(ns) + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- if (setName != null)
- {
- dataOffset += ByteUtil.EstimateSizeUtf8(setName) + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- if (partsFullSize > 0)
- {
- dataOffset += partsFullSize + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- if (partsPartialSize > 0)
- {
- dataOffset += partsPartialSize + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- if (maxRecords > 0)
- {
- dataOffset += 8 + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- if (policy.recordsPerSecond > 0)
- {
- dataOffset += 4 + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
-
- // Estimate scan timeout size.
- dataOffset += 4 + FIELD_HEADER_SIZE;
- fieldCount++;
-
- // Estimate taskId size.
- dataOffset += 8 + FIELD_HEADER_SIZE;
- fieldCount++;
-
- if (binNames != null)
- {
- foreach (string binName in binNames)
- {
- EstimateOperationSize(binName);
- }
- }
-
- SizeBuffer();
- int readAttr = Command.INFO1_READ;
-
- if (!policy.includeBinData)
- {
- readAttr |= Command.INFO1_NOBINDATA;
- }
-
- // Clusters that support partition queries also support not sending partition done messages.
- int operationCount = (binNames == null) ? 0 : binNames.Length;
- WriteHeaderRead(policy, totalTimeout, readAttr, 0, Command.INFO3_PARTITION_DONE, fieldCount, operationCount);
-
- if (ns != null)
- {
- WriteField(ns, FieldType.NAMESPACE);
- }
-
- if (setName != null)
- {
- WriteField(setName, FieldType.TABLE);
- }
-
- if (partsFullSize > 0)
- {
- WriteFieldHeader(partsFullSize, FieldType.PID_ARRAY);
-
- foreach (PartitionStatus part in nodePartitions.partsFull)
- {
- ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset);
- dataOffset += 2;
- }
- }
-
- if (partsPartialSize > 0)
- {
- WriteFieldHeader(partsPartialSize, FieldType.DIGEST_ARRAY);
-
- foreach (PartitionStatus part in nodePartitions.partsPartial) {
- Array.Copy(part.digest, 0, dataBuffer, dataOffset, 20);
- dataOffset += 20;
- }
- }
-
- if (maxRecords > 0)
- {
- WriteField((ulong)maxRecords, FieldType.MAX_RECORDS);
- }
-
- if (policy.recordsPerSecond > 0)
- {
- WriteField(policy.recordsPerSecond, FieldType.RECORDS_PER_SECOND);
- }
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
-
- // Write scan timeout
- WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT);
-
- // Write taskId field
- WriteField(taskId, FieldType.TRAN_ID);
-
- if (binNames != null)
- {
- foreach (string binName in binNames)
- {
- WriteOperation(binName, Operation.Type.READ);
- }
- }
- End();
- }
-
- //--------------------------------------------------
- // Query
- //--------------------------------------------------
-
- protected virtual internal void SetQuery
- (
- Cluster cluster,
- Policy policy,
- Statement statement,
- ulong taskId,
- bool background,
- NodePartitions nodePartitions
- )
- {
- byte[] functionArgBuffer = null;
- int fieldCount = 0;
- int filterSize = 0;
- int binNameSize = 0;
- bool isNew = cluster.hasPartitionQuery;
-
- Begin();
-
- if (statement.ns != null)
- {
- dataOffset += ByteUtil.EstimateSizeUtf8(statement.ns) + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- if (statement.setName != null)
- {
- dataOffset += ByteUtil.EstimateSizeUtf8(statement.setName) + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- // Estimate recordsPerSecond field size. This field is used in new servers and not used
- // (but harmless to add) in old servers.
- if (statement.recordsPerSecond > 0)
- {
- dataOffset += 4 + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- // Estimate socket timeout field size. This field is used in new servers and not used
- // (but harmless to add) in old servers.
- dataOffset += 4 + FIELD_HEADER_SIZE;
- fieldCount++;
-
- // Estimate taskId field.
- dataOffset += 8 + FIELD_HEADER_SIZE;
- fieldCount++;
-
- byte[] packedCtx = null;
-
- if (statement.filter != null)
- {
- IndexCollectionType type = statement.filter.CollectionType;
-
- // Estimate INDEX_TYPE field.
- if (type != IndexCollectionType.DEFAULT)
- {
- dataOffset += FIELD_HEADER_SIZE + 1;
- fieldCount++;
- }
-
- // Estimate INDEX_RANGE field.
- dataOffset += FIELD_HEADER_SIZE;
- filterSize++; // num filters
- filterSize += statement.filter.EstimateSize();
- dataOffset += filterSize;
- fieldCount++;
-
- if (!isNew)
- {
- // Query bin names are specified as a field (Scan bin names are specified later as operations)
- // in old servers. Estimate size for selected bin names.
- if (statement.binNames != null && statement.binNames.Length > 0)
- {
- dataOffset += FIELD_HEADER_SIZE;
- binNameSize++; // num bin names
-
- foreach (string binName in statement.binNames)
- {
- binNameSize += ByteUtil.EstimateSizeUtf8(binName) + 1;
- }
- dataOffset += binNameSize;
- fieldCount++;
- }
- }
-
- packedCtx = statement.filter.PackedCtx;
-
- if (packedCtx != null)
- {
- dataOffset += FIELD_HEADER_SIZE + packedCtx.Length;
- fieldCount++;
- }
- }
-
- // Estimate aggregation/background function size.
- if (statement.functionName != null)
- {
- dataOffset += FIELD_HEADER_SIZE + 1; // udf type
- dataOffset += ByteUtil.EstimateSizeUtf8(statement.packageName) + FIELD_HEADER_SIZE;
- dataOffset += ByteUtil.EstimateSizeUtf8(statement.functionName) + FIELD_HEADER_SIZE;
-
- if (statement.functionArgs.Length > 0)
- {
- functionArgBuffer = Packer.Pack(statement.functionArgs);
- }
- else
- {
- functionArgBuffer = new byte[0];
- }
- dataOffset += FIELD_HEADER_SIZE + functionArgBuffer.Length;
- fieldCount += 4;
- }
-
- if (policy.filterExp != null)
- {
- dataOffset += policy.filterExp.Size();
- fieldCount++;
- }
-
- long maxRecords = 0;
- int partsFullSize = 0;
- int partsPartialDigestSize = 0;
- int partsPartialBValSize = 0;
-
- if (nodePartitions != null)
- {
- partsFullSize = nodePartitions.partsFull.Count * 2;
- partsPartialDigestSize = nodePartitions.partsPartial.Count * 20;
-
- if (statement.filter != null)
- {
- partsPartialBValSize = nodePartitions.partsPartial.Count * 8;
- }
- maxRecords = nodePartitions.recordMax;
- }
-
- if (partsFullSize > 0)
- {
- dataOffset += partsFullSize + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- if (partsPartialDigestSize > 0)
- {
- dataOffset += partsPartialDigestSize + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- if (partsPartialBValSize > 0)
- {
- dataOffset += partsPartialBValSize + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- // Estimate max records field size. This field is used in new servers and not used
- // (but harmless to add) in old servers.
- if (maxRecords > 0)
- {
- dataOffset += 8 + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- // Operations (used in query execute) and bin names (used in scan/query) are mutually exclusive.
- int operationCount = 0;
-
- if (statement.operations != null)
- {
- // Estimate size for background operations.
- if (!background)
- {
- throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Operations not allowed in foreground query");
- }
-
- foreach (Operation operation in statement.operations)
- {
- if (!Operation.IsWrite(operation.type))
- {
- throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Read operations not allowed in background query");
- }
- EstimateOperationSize(operation);
- }
- operationCount = statement.operations.Length;
- }
- else if (statement.binNames != null && (isNew || statement.filter == null))
- {
- // Estimate size for selected bin names (query bin names already handled for old servers).
- foreach (string binName in statement.binNames)
- {
- EstimateOperationSize(binName);
- }
- operationCount = statement.binNames.Length;
- }
-
- SizeBuffer();
-
- if (background)
- {
- WriteHeaderWrite((WritePolicy)policy, Command.INFO2_WRITE, fieldCount, operationCount);
- }
- else
- {
- QueryPolicy qp = (QueryPolicy)policy;
- int readAttr = Command.INFO1_READ;
- int writeAttr = 0;
-
- if (!qp.includeBinData)
- {
- readAttr |= Command.INFO1_NOBINDATA;
- }
-
- if (qp.shortQuery || qp.expectedDuration == QueryDuration.SHORT)
- {
- readAttr |= Command.INFO1_SHORT_QUERY;
- }
- else if (qp.expectedDuration == QueryDuration.LONG_RELAX_AP)
- {
- writeAttr |= Command.INFO2_RELAX_AP_LONG_QUERY;
- }
-
- int infoAttr = (isNew || statement.filter == null) ? Command.INFO3_PARTITION_DONE : 0;
-
- WriteHeaderRead(policy, totalTimeout, readAttr, writeAttr, infoAttr, fieldCount, operationCount);
- }
-
- if (statement.ns != null)
- {
- WriteField(statement.ns, FieldType.NAMESPACE);
- }
-
- if (statement.setName != null)
- {
- WriteField(statement.setName, FieldType.TABLE);
- }
-
- // Write records per second.
- if (statement.recordsPerSecond > 0)
- {
- WriteField(statement.recordsPerSecond, FieldType.RECORDS_PER_SECOND);
- }
-
- // Write socket idle timeout.
- WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT);
-
- // Write taskId field
- WriteField(taskId, FieldType.TRAN_ID);
-
- if (statement.filter != null)
- {
- IndexCollectionType type = statement.filter.CollectionType;
-
- if (type != IndexCollectionType.DEFAULT)
- {
- WriteFieldHeader(1, FieldType.INDEX_TYPE);
- dataBuffer[dataOffset++] = (byte)type;
- }
-
- WriteFieldHeader(filterSize, FieldType.INDEX_RANGE);
- dataBuffer[dataOffset++] = (byte)1;
- dataOffset = statement.filter.Write(dataBuffer, dataOffset);
-
- if (!isNew)
- {
- // Query bin names are specified as a field (Scan bin names are specified later as operations)
- // in old servers.
- if (statement.binNames != null && statement.binNames.Length > 0)
- {
- WriteFieldHeader(binNameSize, FieldType.QUERY_BINLIST);
- dataBuffer[dataOffset++] = (byte)statement.binNames.Length;
-
- foreach (string binName in statement.binNames)
- {
- int len = ByteUtil.StringToUtf8(binName, dataBuffer, dataOffset + 1);
- dataBuffer[dataOffset] = (byte)len;
- dataOffset += len + 1;
- }
- }
- }
-
- if (packedCtx != null)
- {
- WriteFieldHeader(packedCtx.Length, FieldType.INDEX_CONTEXT);
- Array.Copy(packedCtx, 0, dataBuffer, dataOffset, packedCtx.Length);
- dataOffset += packedCtx.Length;
- }
- }
-
- if (statement.functionName != null)
- {
- WriteFieldHeader(1, FieldType.UDF_OP);
- dataBuffer[dataOffset++] = background ? (byte)2 : (byte)1;
- WriteField(statement.packageName, FieldType.UDF_PACKAGE_NAME);
- WriteField(statement.functionName, FieldType.UDF_FUNCTION);
- WriteField(functionArgBuffer, FieldType.UDF_ARGLIST);
- }
-
- if (policy.filterExp != null)
- {
- policy.filterExp.Write(this);
- }
-
- if (partsFullSize > 0)
- {
- WriteFieldHeader(partsFullSize, FieldType.PID_ARRAY);
-
- foreach (PartitionStatus part in nodePartitions.partsFull)
- {
- ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset);
- dataOffset += 2;
- }
- }
-
- if (partsPartialDigestSize > 0)
- {
- WriteFieldHeader(partsPartialDigestSize, FieldType.DIGEST_ARRAY);
-
- foreach (PartitionStatus part in nodePartitions.partsPartial)
- {
- Array.Copy(part.digest, 0, dataBuffer, dataOffset, 20);
- dataOffset += 20;
- }
- }
-
- if (partsPartialBValSize > 0)
- {
- WriteFieldHeader(partsPartialBValSize, FieldType.BVAL_ARRAY);
-
- foreach (PartitionStatus part in nodePartitions.partsPartial)
- {
- ByteUtil.LongToLittleBytes(part.bval, dataBuffer, dataOffset);
- dataOffset += 8;
- }
- }
-
- if (maxRecords > 0)
- {
- WriteField((ulong)maxRecords, FieldType.MAX_RECORDS);
- }
-
- if (statement.operations != null)
- {
- foreach (Operation operation in statement.operations)
- {
- WriteOperation(operation);
- }
- }
- else if (statement.binNames != null && (isNew || statement.filter == null))
- {
- foreach (string binName in statement.binNames)
- {
- WriteOperation(binName, Operation.Type.READ);
- }
- }
- End();
- }
-
- //--------------------------------------------------
- // Command Sizing
- //--------------------------------------------------
-
- private int EstimateKeySize(Policy policy, Key key)
- {
- int fieldCount = 0;
-
- if (key.ns != null)
- {
- dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- if (key.setName != null)
- {
- dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
- fieldCount++;
- }
-
- dataOffset += key.digest.Length + FIELD_HEADER_SIZE;
- fieldCount++;
-
- if (policy.sendKey)
- {
- dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1;
- fieldCount++;
- }
- return fieldCount;
- }
-
- private int EstimateUdfSize(string packageName, string functionName, byte[] bytes)
- {
- dataOffset += ByteUtil.EstimateSizeUtf8(packageName) + FIELD_HEADER_SIZE;
- dataOffset += ByteUtil.EstimateSizeUtf8(functionName) + FIELD_HEADER_SIZE;
- dataOffset += bytes.Length + FIELD_HEADER_SIZE;
- return 3;
- }
-
- private void EstimateOperationSize(Bin bin)
- {
- dataOffset += ByteUtil.EstimateSizeUtf8(bin.name) + OPERATION_HEADER_SIZE;
- dataOffset += bin.value.EstimateSize();
- }
-
- private void EstimateOperationSize(Operation operation)
- {
- dataOffset += ByteUtil.EstimateSizeUtf8(operation.binName) + OPERATION_HEADER_SIZE;
- dataOffset += operation.value.EstimateSize();
- }
-
- private void EstimateReadOperationSize(Operation operation)
- {
- if (Operation.IsWrite(operation.type))
- {
- throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in batch read");
- }
- dataOffset += ByteUtil.EstimateSizeUtf8(operation.binName) + OPERATION_HEADER_SIZE;
- dataOffset += operation.value.EstimateSize();
- }
-
- private void EstimateOperationSize(string binName)
- {
- dataOffset += ByteUtil.EstimateSizeUtf8(binName) + OPERATION_HEADER_SIZE;
- }
-
- private void EstimateOperationSize()
- {
- dataOffset += OPERATION_HEADER_SIZE;
- }
-
- //--------------------------------------------------
- // Command Writes
- //--------------------------------------------------
-
- ///
- /// Header write for write commands.
- ///
- private void WriteHeaderWrite(WritePolicy policy, int writeAttr, int fieldCount, int operationCount)
- {
- // Set flags.
- int generation = 0;
- int infoAttr = 0;
-
- switch (policy.recordExistsAction)
- {
- case RecordExistsAction.UPDATE:
- break;
- case RecordExistsAction.UPDATE_ONLY:
- infoAttr |= Command.INFO3_UPDATE_ONLY;
- break;
- case RecordExistsAction.REPLACE:
- infoAttr |= Command.INFO3_CREATE_OR_REPLACE;
- break;
- case RecordExistsAction.REPLACE_ONLY:
- infoAttr |= Command.INFO3_REPLACE_ONLY;
- break;
- case RecordExistsAction.CREATE_ONLY:
- writeAttr |= Command.INFO2_CREATE_ONLY;
- break;
- }
-
- switch (policy.generationPolicy)
- {
- case GenerationPolicy.NONE:
- break;
- case GenerationPolicy.EXPECT_GEN_EQUAL:
- generation = policy.generation;
- writeAttr |= Command.INFO2_GENERATION;
- break;
- case GenerationPolicy.EXPECT_GEN_GT:
- generation = policy.generation;
- writeAttr |= Command.INFO2_GENERATION_GT;
- break;
- }
-
- if (policy.commitLevel == CommitLevel.COMMIT_MASTER)
- {
- infoAttr |= Command.INFO3_COMMIT_MASTER;
- }
-
- if (policy.durableDelete)
- {
- writeAttr |= Command.INFO2_DURABLE_DELETE;
- }
-
- dataOffset += 8;
-
- // Write all header data except total size which must be written last.
- dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length.
- dataBuffer[dataOffset++] = (byte)0;
- dataBuffer[dataOffset++] = (byte)writeAttr;
- dataBuffer[dataOffset++] = (byte)infoAttr;
- dataBuffer[dataOffset++] = 0; // unused
- dataBuffer[dataOffset++] = 0; // clear the result code
- dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset);
- dataOffset += ByteUtil.IntToBytes((uint)policy.expiration, dataBuffer, dataOffset);
- dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset);
- dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
- dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset);
- }
-
- ///
- /// Header write for operate command.
- ///
- private void WriteHeaderReadWrite
- (
- WritePolicy policy,
- OperateArgs args,
- int fieldCount
- )
- {
- // Set flags.
- int generation = 0;
- int ttl = args.hasWrite ? policy.expiration : policy.readTouchTtlPercent;
- int readAttr = args.readAttr;
- int writeAttr = args.writeAttr;
- int infoAttr = 0;
- int operationCount = args.operations.Length;
-
- switch (policy.recordExistsAction)
- {
- case RecordExistsAction.UPDATE:
- break;
- case RecordExistsAction.UPDATE_ONLY:
- infoAttr |= Command.INFO3_UPDATE_ONLY;
- break;
- case RecordExistsAction.REPLACE:
- infoAttr |= Command.INFO3_CREATE_OR_REPLACE;
- break;
- case RecordExistsAction.REPLACE_ONLY:
- infoAttr |= Command.INFO3_REPLACE_ONLY;
- break;
- case RecordExistsAction.CREATE_ONLY:
- writeAttr |= Command.INFO2_CREATE_ONLY;
- break;
- }
-
- switch (policy.generationPolicy)
- {
- case GenerationPolicy.NONE:
- break;
- case GenerationPolicy.EXPECT_GEN_EQUAL:
- generation = policy.generation;
- writeAttr |= Command.INFO2_GENERATION;
- break;
- case GenerationPolicy.EXPECT_GEN_GT:
- generation = policy.generation;
- writeAttr |= Command.INFO2_GENERATION_GT;
- break;
- }
-
- if (policy.commitLevel == CommitLevel.COMMIT_MASTER)
- {
- infoAttr |= Command.INFO3_COMMIT_MASTER;
- }
-
- if (policy.durableDelete)
- {
- writeAttr |= Command.INFO2_DURABLE_DELETE;
- }
-
- switch (policy.readModeSC)
- {
- case ReadModeSC.SESSION:
- break;
- case ReadModeSC.LINEARIZE:
- infoAttr |= Command.INFO3_SC_READ_TYPE;
- break;
- case ReadModeSC.ALLOW_REPLICA:
- infoAttr |= Command.INFO3_SC_READ_RELAX;
- break;
- case ReadModeSC.ALLOW_UNAVAILABLE:
- infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX;
- break;
- }
-
- if (policy.readModeAP == ReadModeAP.ALL)
- {
- readAttr |= Command.INFO1_READ_MODE_AP_ALL;
- }
-
- if (policy.compress)
- {
- readAttr |= Command.INFO1_COMPRESS_RESPONSE;
- }
-
- dataOffset += 8;
-
- // Write all header data except total size which must be written last.
- dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length.
- dataBuffer[dataOffset++] = (byte)readAttr;
- dataBuffer[dataOffset++] = (byte)writeAttr;
- dataBuffer[dataOffset++] = (byte)infoAttr;
- dataBuffer[dataOffset++] = 0; // unused
- dataBuffer[dataOffset++] = 0; // clear the result code
- dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset);
- dataOffset += ByteUtil.IntToBytes((uint)ttl, dataBuffer, dataOffset);
- dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset);
- dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
- dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset);
- }
-
- ///
- /// Header write for read commands.
- ///
- private void WriteHeaderRead
- (
- Policy policy,
- int timeout,
- int readAttr,
- int writeAttr,
- int infoAttr,
- int fieldCount,
- int operationCount
- )
- {
- switch (policy.readModeSC)
- {
- case ReadModeSC.SESSION:
- break;
- case ReadModeSC.LINEARIZE:
- infoAttr |= Command.INFO3_SC_READ_TYPE;
- break;
- case ReadModeSC.ALLOW_REPLICA:
- infoAttr |= Command.INFO3_SC_READ_RELAX;
- break;
- case ReadModeSC.ALLOW_UNAVAILABLE:
- infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX;
- break;
- }
-
- if (policy.readModeAP == ReadModeAP.ALL)
- {
- readAttr |= Command.INFO1_READ_MODE_AP_ALL;
- }
-
- if (policy.compress)
- {
- readAttr |= Command.INFO1_COMPRESS_RESPONSE;
- }
-
- dataOffset += 8;
-
- // Write all header data except total size which must be written last.
- dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length.
- dataBuffer[dataOffset++] = (byte)readAttr;
- dataBuffer[dataOffset++] = (byte)writeAttr;
- dataBuffer[dataOffset++] = (byte)infoAttr;
-
- for (int i = 0; i < 6; i++)
- {
- dataBuffer[dataOffset++] = 0;
- }
- dataOffset += ByteUtil.IntToBytes((uint)policy.readTouchTtlPercent, dataBuffer, dataOffset);
- dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset);
- dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
- dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset);
- }
-
- ///
- /// Header write for read header commands.
- ///
- private void WriteHeaderReadHeader(Policy policy, int readAttr, int fieldCount, int operationCount)
- {
- int infoAttr = 0;
-
- switch (policy.readModeSC)
- {
- case ReadModeSC.SESSION:
- break;
- case ReadModeSC.LINEARIZE:
- infoAttr |= Command.INFO3_SC_READ_TYPE;
- break;
- case ReadModeSC.ALLOW_REPLICA:
- infoAttr |= Command.INFO3_SC_READ_RELAX;
- break;
- case ReadModeSC.ALLOW_UNAVAILABLE:
- infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX;
- break;
- }
-
- if (policy.readModeAP == ReadModeAP.ALL)
- {
- readAttr |= Command.INFO1_READ_MODE_AP_ALL;
- }
-
- dataOffset += 8;
-
- // Write all header data except total size which must be written last.
- dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length.
- dataBuffer[dataOffset++] = (byte)readAttr;
- dataBuffer[dataOffset++] = (byte)0;
- dataBuffer[dataOffset++] = (byte)infoAttr;
-
- for (int i = 0; i < 6; i++)
- {
- dataBuffer[dataOffset++] = 0;
- }
- dataOffset += ByteUtil.IntToBytes((uint)policy.readTouchTtlPercent, dataBuffer, dataOffset);
- dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset);
- dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
- dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset);
- }
-
- private void WriteKey(Policy policy, Key key)
- {
- // Write key into dataBuffer.
- if (key.ns != null)
- {
- WriteField(key.ns, FieldType.NAMESPACE);
- }
-
- if (key.setName != null)
- {
- WriteField(key.setName, FieldType.TABLE);
- }
-
- WriteField(key.digest, FieldType.DIGEST_RIPE);
-
- if (policy.sendKey)
- {
- WriteField(key.userKey, FieldType.KEY);
- }
- }
-
- private int WriteReadOnlyOperations(Operation[] ops, int readAttr)
- {
- bool readBin = false;
- bool readHeader = false;
-
- foreach (Operation op in ops)
- {
- switch (op.type)
- {
- case Operation.Type.READ:
- // Read all bins if no bin is specified.
- if (op.binName == null)
- {
- readAttr |= Command.INFO1_GET_ALL;
- }
- readBin = true;
- break;
-
- case Operation.Type.READ_HEADER:
- readHeader = true;
- break;
-
- default:
- break;
- }
- WriteOperation(op);
- }
-
- if (readHeader && !readBin)
- {
- readAttr |= Command.INFO1_NOBINDATA;
- }
- return readAttr;
- }
-
- private void WriteOperation(Bin bin, Operation.Type operationType)
- {
- int nameLength = ByteUtil.StringToUtf8(bin.name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE);
- int valueLength = bin.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength);
-
- ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset);
- dataOffset += 4;
- dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType);
- dataBuffer[dataOffset++] = (byte) bin.value.Type;
- dataBuffer[dataOffset++] = (byte) 0;
- dataBuffer[dataOffset++] = (byte) nameLength;
- dataOffset += nameLength + valueLength;
- }
-
- private void WriteOperation(Operation operation)
- {
- int nameLength = ByteUtil.StringToUtf8(operation.binName, dataBuffer, dataOffset + OPERATION_HEADER_SIZE);
- int valueLength = operation.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength);
-
- ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset);
- dataOffset += 4;
- dataBuffer[dataOffset++] = Operation.GetProtocolType(operation.type);
- dataBuffer[dataOffset++] = (byte) operation.value.Type;
- dataBuffer[dataOffset++] = (byte) 0;
- dataBuffer[dataOffset++] = (byte) nameLength;
- dataOffset += nameLength + valueLength;
- }
-
- private void WriteOperation(string name, Operation.Type operationType)
- {
- int nameLength = ByteUtil.StringToUtf8(name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE);
-
- ByteUtil.IntToBytes((uint)(nameLength + 4), dataBuffer, dataOffset);
- dataOffset += 4;
- dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType);
- dataBuffer[dataOffset++] = (byte) 0;
- dataBuffer[dataOffset++] = (byte) 0;
- dataBuffer[dataOffset++] = (byte) nameLength;
- dataOffset += nameLength;
- }
-
- private void WriteOperation(Operation.Type operationType)
- {
- ByteUtil.IntToBytes(4, dataBuffer, dataOffset);
- dataOffset += 4;
- dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType);
- dataBuffer[dataOffset++] = 0;
- dataBuffer[dataOffset++] = 0;
- dataBuffer[dataOffset++] = 0;
- }
-
- private void WriteField(Value value, int type)
- {
- int offset = dataOffset + FIELD_HEADER_SIZE;
- dataBuffer[offset++] = (byte)value.Type;
- int len = value.Write(dataBuffer, offset) + 1;
- WriteFieldHeader(len, type);
- dataOffset += len;
- }
-
- private void WriteField(string str, int type)
- {
- int len = ByteUtil.StringToUtf8(str, dataBuffer, dataOffset + FIELD_HEADER_SIZE);
- WriteFieldHeader(len, type);
- dataOffset += len;
- }
-
- private void WriteField(byte[] bytes, int type)
- {
- Array.Copy(bytes, 0, dataBuffer, dataOffset + FIELD_HEADER_SIZE, bytes.Length);
- WriteFieldHeader(bytes.Length, type);
- dataOffset += bytes.Length;
- }
-
- private void WriteField(int val, int type)
- {
- WriteFieldHeader(4, type);
- dataOffset += ByteUtil.IntToBytes((uint)val, dataBuffer, dataOffset);
- }
-
- private void WriteField(ulong val, int type)
- {
- WriteFieldHeader(8, type);
- dataOffset += ByteUtil.LongToBytes(val, dataBuffer, dataOffset);
- }
-
- private void WriteFieldHeader(int size, int type)
- {
- dataOffset += ByteUtil.IntToBytes((uint)size + 1, dataBuffer, dataOffset);
- dataBuffer[dataOffset++] = (byte)type;
- }
-
- internal virtual void WriteExpHeader(int size)
- {
- WriteFieldHeader(size, FieldType.FILTER_EXP);
- }
-
- private void Begin()
- {
- dataOffset = MSG_TOTAL_HEADER_SIZE;
- }
-
- private bool SizeBuffer(Policy policy)
- {
- if (policy.compress && dataOffset > COMPRESS_THRESHOLD)
- {
- // Command will be compressed. First, write uncompressed command
- // into separate dataBuffer. Save normal dataBuffer for compressed command.
- // Normal dataBuffer in async mode is from dataBuffer pool that is used to
- // minimize memory pinning during socket operations.
- dataBuffer = new byte[dataOffset];
- dataOffset = 0;
- return true;
- }
- else
- {
- // Command will be uncompressed.
- SizeBuffer();
- return false;
- }
- }
-
- private void End(bool compress)
- {
- if (!compress)
- {
- End();
- return;
- }
-
- // Write proto header.
- ulong size = ((ulong)dataOffset - 8) | (CL_MSG_VERSION << 56) | (AS_MSG_TYPE << 48);
- ByteUtil.LongToBytes(size, dataBuffer, 0);
-
- byte[] srcBuf = dataBuffer;
- int srcSize = dataOffset;
-
- // Increase requested dataBuffer size in case compressed dataBuffer size is
- // greater than the uncompressed dataBuffer size.
- dataOffset += 16 + 100;
-
- // This method finds dataBuffer of requested size, resets dataOffset to segment offset
- // and returns dataBuffer max size;
- int trgBufSize = SizeBuffer();
-
- // Compress to target starting at new dataOffset plus new header.
- int trgSize = ByteUtil.Compress(srcBuf, srcSize, dataBuffer, dataOffset + 16, trgBufSize - 16) + 16;
-
- ulong proto = ((ulong)trgSize - 8) | (CL_MSG_VERSION << 56) | (MSG_TYPE_COMPRESSED << 48);
- ByteUtil.LongToBytes(proto, dataBuffer, dataOffset);
- ByteUtil.LongToBytes((ulong)srcSize, dataBuffer, dataOffset + 8);
- SetLength(trgSize);
- }
-
- protected internal abstract int SizeBuffer();
- protected internal abstract void End();
- protected internal abstract void SetLength(int length);
-
- //--------------------------------------------------
- // Response Parsing
- //--------------------------------------------------
-
- internal virtual void SkipKey(int fieldCount)
- {
- // There can be fields in the response (setname etc).
- // But for now, ignore them. Expose them to the API if needed in the future.
- for (int i = 0; i < fieldCount; i++)
- {
- int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset);
- dataOffset += 4 + fieldlen;
- }
- }
-
- internal virtual Key ParseKey(int fieldCount, out ulong bval)
- {
- byte[] digest = null;
- string ns = null;
- string setName = null;
- Value userKey = null;
- bval = 0;
-
- for (int i = 0; i < fieldCount; i++)
- {
- int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset);
- dataOffset += 4;
-
- int fieldtype = dataBuffer[dataOffset++];
- int size = fieldlen - 1;
-
- switch (fieldtype)
- {
- case FieldType.DIGEST_RIPE:
- digest = new byte[size];
- Array.Copy(dataBuffer, dataOffset, digest, 0, size);
- break;
-
- case FieldType.NAMESPACE:
- ns = ByteUtil.Utf8ToString(dataBuffer, dataOffset, size);
- break;
-
- case FieldType.TABLE:
- setName = ByteUtil.Utf8ToString(dataBuffer, dataOffset, size);
- break;
-
- case FieldType.KEY:
- int type = dataBuffer[dataOffset++];
- size--;
- userKey = ByteUtil.BytesToKeyValue((ParticleType)type, dataBuffer, dataOffset, size);
- break;
-
- case FieldType.BVAL_ARRAY:
- bval = (ulong)ByteUtil.LittleBytesToLong(dataBuffer, dataOffset);
- break;
- }
- dataOffset += size;
- }
- return new Key(ns, digest, setName, userKey);
- }
-
- public static bool BatchInDoubt(bool isWrite, int commandSentCounter)
- {
- return isWrite && commandSentCounter > 1;
- }
- }
-}
-#pragma warning restore 0618
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+using System.Collections;
+
+#pragma warning disable 0618
+
+namespace Aerospike.Client
+{
+ public abstract class Command
+ {
+ public static readonly int INFO1_READ = (1 << 0); // Contains a read operation.
+ public static readonly int INFO1_GET_ALL = (1 << 1); // Get all bins.
+ public static readonly int INFO1_SHORT_QUERY = (1 << 2); // Short query.
+ public static readonly int INFO1_BATCH = (1 << 3); // Batch read or exists.
+ public static readonly int INFO1_NOBINDATA = (1 << 5); // Do not read the bins.
+ public static readonly int INFO1_READ_MODE_AP_ALL = (1 << 6); // Involve all replicas in read operation.
+ public static readonly int INFO1_COMPRESS_RESPONSE = (1 << 7); // Tell server to compress it's response.
+
+ public static readonly int INFO2_WRITE = (1 << 0); // Create or update record
+ public static readonly int INFO2_DELETE = (1 << 1); // Fling a record into the belly of Moloch.
+ public static readonly int INFO2_GENERATION = (1 << 2); // Update if expected generation == old.
+ public static readonly int INFO2_GENERATION_GT = (1 << 3); // Update if new generation >= old, good for restore.
+ public static readonly int INFO2_DURABLE_DELETE = (1 << 4); // Command resulting in record deletion leaves tombstone (Enterprise only).
+ public static readonly int INFO2_CREATE_ONLY = (1 << 5); // Create only. Fail if record already exists.
+ public static readonly int INFO2_RELAX_AP_LONG_QUERY = (1 << 6); // Treat as long query, but relac read consistency
+ public static readonly int INFO2_RESPOND_ALL_OPS = (1 << 7); // Return a result for every operation.
+
+ public static readonly int INFO3_LAST = (1 << 0); // This is the last of a multi-part message.
+ public static readonly int INFO3_COMMIT_MASTER = (1 << 1); // Commit to master only before declaring success.
+ // On send: Do not return partition done in scan/query.
+ // On receive: Specified partition is done in scan/query.
+ public static readonly int INFO3_PARTITION_DONE = (1 << 2);
+ public static readonly int INFO3_UPDATE_ONLY = (1 << 3); // Update only. Merge bins.
+ public static readonly int INFO3_CREATE_OR_REPLACE = (1 << 4); // Create or completely replace record.
+ public static readonly int INFO3_REPLACE_ONLY = (1 << 5); // Completely replace existing record only.
+ public static readonly int INFO3_SC_READ_TYPE = (1 << 6); // See below.
+ public static readonly int INFO3_SC_READ_RELAX = (1 << 7); // See below.
+
+ // Interpret SC_READ bits in info3.
+ //
+ // RELAX TYPE
+ // strict
+ // ------
+ // 0 0 sequential (default)
+ // 0 1 linearize
+ //
+ // relaxed
+ // -------
+ // 1 0 allow replica
+ // 1 1 allow unavailable
+
+ public static readonly int INFO4_MRT_VERIFY_READ = (1 << 0); // Send MRT version to the server to be verified.
+ public static readonly int INFO4_MRT_ROLL_FORWARD = (1 << 1); // Roll forward MRT.
+ public static readonly int INFO4_MRT_ROLL_BACK = (1 << 2); // Roll back MRT.
+
+ public const byte STATE_READ_AUTH_HEADER = 1;
+ public const byte STATE_READ_HEADER = 2;
+ public const byte STATE_READ_DETAIL = 3;
+ public const byte STATE_COMPLETE = 4;
+
+ public const byte BATCH_MSG_READ = 0x0;
+ public const byte BATCH_MSG_REPEAT = 0x1;
+ public const byte BATCH_MSG_INFO = 0x2;
+ public const byte BATCH_MSG_GEN = 0x4;
+ public const byte BATCH_MSG_TTL = 0x8;
+ public const byte BATCH_MSG_INFO4 = 0x10;
+
+ public const int MSG_TOTAL_HEADER_SIZE = 30;
+ public const int FIELD_HEADER_SIZE = 5;
+ public const int OPERATION_HEADER_SIZE = 8;
+ public const int MSG_REMAINING_HEADER_SIZE = 22;
+ public const int COMPRESS_THRESHOLD = 128;
+ public const ulong CL_MSG_VERSION = 2UL;
+ public const ulong AS_MSG_TYPE = 3UL;
+ public const ulong MSG_TYPE_COMPRESSED = 4UL;
+
+ internal byte[] dataBuffer;
+ internal int dataOffset;
+ internal readonly int maxRetries;
+ internal readonly int serverTimeout;
+ internal int socketTimeout;
+ internal int totalTimeout;
+ internal long? Version;
+
+ protected int resultCode;
+ protected int generation;
+ protected int expiration;
+ protected int fieldCount;
+ protected int opCount;
+
+ public Command(int socketTimeout, int totalTimeout, int maxRetries)
+ {
+ this.maxRetries = maxRetries;
+ this.totalTimeout = totalTimeout;
+
+ if (totalTimeout > 0)
+ {
+ this.socketTimeout = (socketTimeout < totalTimeout && socketTimeout > 0) ? socketTimeout : totalTimeout;
+ this.serverTimeout = this.socketTimeout;
+ }
+ else
+ {
+ this.socketTimeout = socketTimeout;
+ this.serverTimeout = 0;
+ }
+
+ resultCode = 0;
+ generation = 0;
+ expiration = 0;
+ fieldCount = 0;
+ opCount = 0;
+ }
+
+ //--------------------------------------------------
+ // Multi-record Transactions
+ //--------------------------------------------------
+
+ public void SetTxnAddKeys(WritePolicy policy, Key key, OperateArgs args)
+ {
+ Begin();
+ int fieldCount = EstimateKeySize(key);
+ dataOffset += args.size;
+
+ bool compress = SizeBuffer(policy);
+
+ dataOffset += 8;
+ dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE;
+ dataBuffer[dataOffset++] = (byte)args.readAttr;
+ dataBuffer[dataOffset++] = (byte)args.writeAttr;
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = 0;
+ dataBuffer[dataOffset++] = 0;
+ dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes((uint)policy.expiration, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)args.operations.Length, dataBuffer, dataOffset);
+
+ WriteKey(key);
+
+ foreach (Operation operation in args.operations)
+ {
+ WriteOperation(operation);
+ }
+
+ End(compress);
+ }
+
+ public void SetTxnVerify(Key key, long ver)
+ {
+ Begin();
+ int fieldCount = EstimateKeySize(key);
+
+ // Version field.
+ dataOffset += 7 + FIELD_HEADER_SIZE;
+ fieldCount++;
+
+ SizeBuffer();
+ dataOffset += 8;
+ dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE;
+ dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA);
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = (byte)Command.INFO3_SC_READ_TYPE;
+ dataBuffer[dataOffset++] = (byte)Command.INFO4_MRT_VERIFY_READ;
+ dataBuffer[dataOffset++] = 0;
+ dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset);
+
+ WriteKey(key);
+ WriteFieldVersion(ver);
+ End();
+ }
+
+ public void SetBatchTxnVerify(
+ BatchPolicy policy,
+ Key[] keys,
+ long?[] versions,
+ BatchNode batch
+ )
+ {
+ BatchOffsetsNative offsets = new(batch);
+ SetBatchTxnVerify(policy, keys, versions, offsets);
+ }
+
+ public void SetBatchTxnVerify(
+ BatchPolicy policy,
+ Key[] keys,
+ long?[] versions,
+ BatchOffsets offsets
+ )
+ {
+ // Estimate buffer size.
+ Begin();
+
+ // Batch field
+ dataOffset += FIELD_HEADER_SIZE + 5;
+
+ Key keyPrev = null;
+ long? verPrev = null;
+ int max = offsets.Size();
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ Key key = keys[offset];
+ long? ver = versions[offset];
+
+ dataOffset += key.digest.Length + 4;
+
+ if (CanRepeat(key, keyPrev, ver, verPrev))
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataOffset++;
+ }
+ else
+ {
+ // Write full header and namespace/set/bin names.
+ dataOffset += 9; // header(4) + info4(1) + fieldCount(2) + opCount(2) = 9
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE;
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
+
+ if (ver.HasValue)
+ {
+ dataOffset += 7 + FIELD_HEADER_SIZE;
+ }
+ keyPrev = key;
+ verPrev = ver;
+ }
+ }
+
+ bool compress = SizeBuffer(policy);
+
+ WriteBatchHeader(policy, totalTimeout, 1);
+
+ int fieldSizeOffset = dataOffset;
+ WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
+
+ ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = GetBatchFlags(policy);
+ keyPrev = null;
+ verPrev = null;
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ Key key = keys[offset];
+ long? ver = versions[offset];
+
+ ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ byte[] digest = key.digest;
+ Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length);
+ dataOffset += digest.Length;
+
+ if (CanRepeat(key, keyPrev, ver, verPrev))
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
+ }
+ else
+ {
+ // Write full message.
+ dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4);
+ dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA);
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = (byte)Command.INFO3_SC_READ_TYPE;
+ dataBuffer[dataOffset++] = (byte)Command.INFO4_MRT_VERIFY_READ;
+
+ int fieldCount = 0;
+
+ if (ver.HasValue)
+ {
+ fieldCount++;
+ }
+
+ WriteBatchFields(key, fieldCount, 0);
+
+ if (ver.HasValue)
+ {
+ WriteFieldVersion(ver.Value);
+ }
+
+ keyPrev = key;
+ verPrev = ver;
+ }
+ }
+
+ // Write real field size.
+ ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset);
+ End(compress);
+ }
+
+ public void SetTxnMarkRollForward(Key key)
+ {
+ Bin bin = new("fwd", true);
+
+ Begin();
+ int fieldCount = EstimateKeySize(key);
+ EstimateOperationSize(bin);
+ //SizeBuffer();
+ WriteTxnMonitor(key, 0, Command.INFO2_WRITE, fieldCount, 1);
+ WriteOperation(bin, Operation.Type.WRITE);
+ End();
+ }
+
+ public void SetTxnRoll(Key key, Txn txn, int txnAttr)
+ {
+ Begin();
+ int fieldCount = EstimateKeySize(key);
+
+ fieldCount += SizeTxn(key, txn, false);
+
+ SizeBuffer();
+ dataOffset += 8;
+ dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE;
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = (byte)(Command.INFO2_WRITE | Command.INFO2_DURABLE_DELETE);
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = (byte)txnAttr;
+ dataBuffer[dataOffset++] = 0; // clear the result code
+ dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset);
+
+ WriteKey(key);
+ WriteTxn(txn, false);
+ End();
+ }
+
+ public void SetBatchTxnRoll(
+ BatchPolicy policy,
+ Txn txn,
+ Key[] keys,
+ BatchNode batch,
+ BatchAttr attr
+ )
+ {
+ BatchOffsetsNative offsets = new(batch);
+ SetBatchTxnRoll(policy, txn, keys, attr, offsets);
+ }
+
+ public void SetBatchTxnRoll(
+ BatchPolicy policy,
+ Txn txn,
+ Key[] keys,
+ BatchAttr attr,
+ BatchOffsets offsets
+ )
+ {
+ // Estimate buffer size.
+ Begin();
+ int fieldCount = 1;
+ int max = offsets.Size();
+ long?[] versions = new long?[max];
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ Key key = keys[offset];
+ versions[i] = txn.GetReadVersion(key);
+ }
+
+ // Batch field
+ dataOffset += FIELD_HEADER_SIZE + 5;
+
+ Key keyPrev = null;
+ long? verPrev = null;
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ Key key = keys[offset];
+ long? ver = versions[i];
+
+ dataOffset += key.digest.Length + 4;
+
+ if (CanRepeat(key, keyPrev, ver, verPrev))
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataOffset++;
+ }
+ else
+ {
+ // Write full header and namespace/set/bin names.
+ dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE;
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
+ SizeTxnBatch(txn, ver, attr.hasWrite);
+ dataOffset += 2; // gen(2) = 2
+ keyPrev = key;
+ verPrev = ver;
+ }
+ }
+
+ bool compress = SizeBuffer(policy);
+
+ WriteBatchHeader(policy, totalTimeout, fieldCount);
+
+ int fieldSizeOffset = dataOffset;
+ WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
+
+ ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = GetBatchFlags(policy);
+ keyPrev = null;
+ verPrev = null;
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ Key key = keys[offset];
+ long? ver = versions[i];
+
+ ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ byte[] digest = key.digest;
+ Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length);
+ dataOffset += digest.Length;
+
+ if (CanRepeat(key, keyPrev, ver, verPrev))
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
+ }
+ else
+ {
+ // Write full message.
+ WriteBatchWrite(key, txn, ver, attr, null, 0, 0);
+ keyPrev = key;
+ verPrev = ver;
+ }
+ }
+
+ // Write real field size.
+ ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset);
+ End(compress);
+ }
+
+ public void SetTxnClose(Txn txn, Key key)
+ {
+ Begin();
+ int fieldCount = EstimateKeySize(key);
+ //SizeBuffer();
+ WriteTxnMonitor(key, 0, Command.INFO2_WRITE | Command.INFO2_DELETE | Command.INFO2_DURABLE_DELETE,
+ fieldCount, 0);
+ End();
+ }
+
+ private void WriteTxnMonitor(Key key, int readAttr, int writeAttr, int fieldCount, int opCount)
+ {
+ SizeBuffer();
+ dataOffset += 8;
+ dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE;
+ dataBuffer[dataOffset++] = (byte)readAttr;
+ dataBuffer[dataOffset++] = (byte)writeAttr;
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = 0;
+ dataBuffer[dataOffset++] = 0;
+ dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)opCount, dataBuffer, dataOffset);
+
+ WriteKey(key);
+ }
+
+ //--------------------------------------------------
+ // Writes
+ //--------------------------------------------------
+
+ public virtual void SetWrite(WritePolicy policy, Operation.Type operation, Key key, Bin[] bins)
+ {
+ Begin();
+ int fieldCount = EstimateKeySize(policy, key, true);
+
+ if (policy.filterExp != null)
+ {
+ dataOffset += policy.filterExp.Size();
+ fieldCount++;
+ }
+
+ foreach (Bin bin in bins)
+ {
+ EstimateOperationSize(bin);
+ }
+
+ bool compress = SizeBuffer(policy);
+
+ WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, bins.Length);
+ WriteKey(policy, key, true);
+
+ policy.filterExp?.Write(this);
+
+ foreach (Bin bin in bins)
+ {
+ WriteOperation(bin, operation);
+ }
+ End(compress);
+ }
+
+ public virtual void SetDelete(WritePolicy policy, Key key)
+ {
+ Begin();
+ int fieldCount = EstimateKeySize(policy, key, true);
+
+ if (policy.filterExp != null)
+ {
+ dataOffset += policy.filterExp.Size();
+ fieldCount++;
+ }
+ SizeBuffer();
+ WriteHeaderWrite(policy, Command.INFO2_WRITE | Command.INFO2_DELETE, fieldCount, 0);
+ WriteKey(policy, key, true);
+
+ policy.filterExp?.Write(this);
+ End();
+ }
+
+ public void SetDelete(Policy policy, Key key, BatchAttr attr)
+ {
+ Begin();
+ Expression exp = GetBatchExpression(policy, attr);
+ int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp);
+ SizeBuffer();
+ WriteKeyAttr(policy, key, attr, exp, fieldCount, 0);
+ End();
+ }
+
+ public virtual void SetTouch(WritePolicy policy, Key key)
+ {
+ Begin();
+ int fieldCount = EstimateKeySize(policy, key, true);
+
+ if (policy.filterExp != null)
+ {
+ dataOffset += policy.filterExp.Size();
+ fieldCount++;
+ }
+ EstimateOperationSize();
+ SizeBuffer();
+ WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 1);
+ WriteKey(policy, key, true);
+
+ policy.filterExp?.Write(this);
+ WriteOperation(Operation.Type.TOUCH);
+ End();
+ }
+
+ //--------------------------------------------------
+ // Reads
+ //--------------------------------------------------
+
+ public virtual void SetExists(Policy policy, Key key)
+ {
+ Begin();
+ int fieldCount = EstimateKeySize(policy, key, false);
+
+ if (policy.filterExp != null)
+ {
+ dataOffset += policy.filterExp.Size();
+ fieldCount++;
+ }
+ SizeBuffer();
+ WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0);
+ WriteKey(policy, key, false);
+
+ policy.filterExp?.Write(this);
+ End();
+ }
+
+ public virtual void SetRead(Policy policy, Key key, string[] binNames)
+ {
+ int readAttr = Command.INFO1_READ;
+ int opCount = 0;
+
+ if (binNames != null && binNames.Length > 0)
+ {
+ opCount = binNames.Length;
+ }
+ else
+ {
+ readAttr |= Command.INFO1_GET_ALL;
+ }
+
+ Begin();
+ int fieldCount = EstimateKeySize(policy, key, false);
+
+ if (policy.filterExp != null)
+ {
+ dataOffset += policy.filterExp.Size();
+ fieldCount++;
+ }
+
+ if (opCount != 0)
+ {
+ foreach (string binName in binNames)
+ {
+ EstimateOperationSize(binName);
+ }
+ }
+
+ SizeBuffer();
+ WriteHeaderRead(policy, serverTimeout, readAttr, 0, 0, fieldCount, opCount);
+ WriteKey(policy, key, false);
+
+ policy.filterExp?.Write(this);
+
+ if (opCount != 0)
+ {
+ foreach (string binName in binNames)
+ {
+ WriteOperation(binName, Operation.Type.READ);
+ }
+ }
+ End();
+ }
+
+ public void SetRead(Policy policy, BatchRead br)
+ {
+ Begin();
+
+ BatchReadPolicy rp = br.policy;
+ BatchAttr attr = new();
+ Expression exp;
+ int opCount;
+
+ if (rp != null)
+ {
+ attr.SetRead(rp);
+ exp = rp.filterExp ?? policy.filterExp;
+ }
+ else
+ {
+ attr.SetRead(policy);
+ exp = policy.filterExp;
+ }
+
+ if (br.binNames != null)
+ {
+ opCount = br.binNames.Length;
+
+ foreach (string binName in br.binNames)
+ {
+ EstimateOperationSize(binName);
+ }
+ }
+ else if (br.ops != null)
+ {
+ attr.AdjustRead(br.ops);
+ opCount = br.ops.Length;
+
+ foreach (Operation op in br.ops)
+ {
+ if (Operation.IsWrite(op.type))
+ {
+ throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in read");
+ }
+ EstimateOperationSize(op);
+ }
+ }
+ else
+ {
+ attr.AdjustRead(br.readAllBins);
+ opCount = 0;
+ }
+
+ int fieldCount = EstimateKeyAttrSize(policy, br.key, attr, exp);
+
+ SizeBuffer();
+ WriteKeyAttr(policy, br.key, attr, exp, fieldCount, opCount);
+
+ if (br.binNames != null)
+ {
+ foreach (string binName in br.binNames)
+ {
+ WriteOperation(binName, Operation.Type.READ);
+ }
+ }
+ else if (br.ops != null)
+ {
+ foreach (Operation op in br.ops)
+ {
+ WriteOperation(op);
+ }
+ }
+ End();
+ }
+
+ public void SetRead(Policy policy, Key key, Operation[] ops)
+ {
+ Begin();
+
+ BatchAttr attr = new();
+ attr.SetRead(policy);
+ attr.AdjustRead(ops);
+
+ int fieldCount = EstimateKeyAttrSize(policy, key, attr, policy.filterExp);
+
+ foreach (Operation op in ops)
+ {
+ if (Operation.IsWrite(op.type))
+ {
+ throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in read");
+ }
+ EstimateOperationSize(op);
+ }
+
+ SizeBuffer();
+ WriteKeyAttr(policy, key, attr, policy.filterExp, fieldCount, ops.Length);
+
+ foreach (Operation op in ops)
+ {
+ WriteOperation(op);
+ }
+ End();
+ }
+
+ public virtual void SetReadHeader(Policy policy, Key key)
+ {
+ Begin();
+ int fieldCount = EstimateKeySize(policy, key, false);
+
+ if (policy.filterExp != null)
+ {
+ dataOffset += policy.filterExp.Size();
+ fieldCount++;
+ }
+ SizeBuffer();
+ WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0);
+ WriteKey(policy, key, false);
+
+ policy.filterExp?.Write(this);
+ End();
+ }
+
+ //--------------------------------------------------
+ // Operate
+ //--------------------------------------------------
+
+ public virtual void SetOperate(WritePolicy policy, Key key, OperateArgs args)
+ {
+ Begin();
+ int fieldCount = EstimateKeySize(policy, key, args.hasWrite);
+
+ if (policy.filterExp != null)
+ {
+ dataOffset += policy.filterExp.Size();
+ fieldCount++;
+ }
+ dataOffset += args.size;
+
+ bool compress = SizeBuffer(policy);
+
+ WriteHeaderReadWrite(policy, args, fieldCount);
+ WriteKey(policy, key, args.hasWrite);
+
+ policy.filterExp?.Write(this);
+
+ foreach (Operation operation in args.operations)
+ {
+ WriteOperation(operation);
+ }
+ End(compress);
+ }
+
+ public void SetOperate(Policy policy, BatchAttr attr, Key key, Operation[] ops)
+ {
+ Begin();
+ Expression exp = GetBatchExpression(policy, attr);
+ int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp);
+
+ dataOffset += attr.opSize;
+ bool compress = SizeBuffer(policy);
+ WriteKeyAttr(policy, key, attr, exp, fieldCount, ops.Length);
+
+ foreach (Operation op in ops)
+ {
+ WriteOperation(op);
+ }
+ End(compress);
+ }
+
+
+ //--------------------------------------------------
+ // UDF
+ //--------------------------------------------------
+
+ public virtual void SetUdf(WritePolicy policy, Key key, string packageName, string functionName, Value[] args)
+ {
+ Begin();
+ int fieldCount = EstimateKeySize(policy, key, true);
+
+ if (policy.filterExp != null)
+ {
+ dataOffset += policy.filterExp.Size();
+ fieldCount++;
+ }
+ byte[] argBytes = Packer.Pack(args);
+ fieldCount += EstimateUdfSize(packageName, functionName, argBytes);
+
+ bool compress = SizeBuffer(policy);
+
+ WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 0);
+ WriteKey(policy, key, true);
+
+ policy.filterExp?.Write(this);
+ WriteField(packageName, FieldType.UDF_PACKAGE_NAME);
+ WriteField(functionName, FieldType.UDF_FUNCTION);
+ WriteField(argBytes, FieldType.UDF_ARGLIST);
+ End(compress);
+ }
+
+ public void SetUdf(Policy policy, BatchAttr attr, Key key, string packageName, string functionName, Value[] args)
+ {
+ byte[] argBytes = Packer.Pack(args);
+ SetUdf(policy, attr, key, packageName, functionName, argBytes);
+ }
+
+ public void SetUdf(Policy policy, BatchAttr attr, Key key, string packageName, string functionName, byte[] argBytes)
+ {
+ Begin();
+ Expression exp = GetBatchExpression(policy, attr);
+ int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp);
+ fieldCount += EstimateUdfSize(packageName, functionName, argBytes);
+
+ bool compress = SizeBuffer(policy);
+ WriteKeyAttr(policy, key, attr, exp, fieldCount, 0);
+ WriteField(packageName, FieldType.UDF_PACKAGE_NAME);
+ WriteField(functionName, FieldType.UDF_FUNCTION);
+ WriteField(argBytes, FieldType.UDF_ARGLIST);
+ End(compress);
+ }
+
+ //--------------------------------------------------
+ // Batch Read Only
+ //--------------------------------------------------
+
+ public virtual void SetBatchRead(BatchPolicy policy, List records, BatchNode batch)
+ {
+ // Estimate full row size
+ int[] offsets = batch.offsets;
+ int max = batch.offsetsSize;
+ BatchRead prev = null;
+
+ Begin();
+ int fieldCount = 1;
+
+ if (policy.filterExp != null)
+ {
+ dataOffset += policy.filterExp.Size();
+ fieldCount++;
+ }
+
+ dataOffset += FIELD_HEADER_SIZE + 5;
+
+ for (int i = 0; i < max; i++)
+ {
+ BatchRead record = records[offsets[i]];
+ Key key = record.key;
+ string[] binNames = record.binNames;
+ Operation[] ops = record.ops;
+
+ dataOffset += key.digest.Length + 4;
+
+ // Avoid relatively expensive full equality checks for performance reasons.
+ // Use reference equality only in hope that common namespaces/bin names are set from
+ // fixed variables. It's fine if equality not determined correctly because it just
+ // results in more space used. The batch will still be correct.
+ if (prev != null && prev.key.ns == key.ns && prev.key.setName == key.setName &&
+ prev.binNames == binNames && prev.readAllBins == record.readAllBins &&
+ prev.ops == ops)
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataOffset++;
+ }
+ else
+ {
+ // Estimate full header, namespace and bin names.
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6;
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
+
+ if (binNames != null)
+ {
+ foreach (string binName in binNames)
+ {
+ EstimateOperationSize(binName);
+ }
+ }
+ else if (ops != null)
+ {
+ foreach (Operation op in ops)
+ {
+ EstimateReadOperationSize(op);
+ }
+ }
+ prev = record;
+ }
+ }
+
+ bool compress = SizeBuffer(policy);
+
+ int readAttr = Command.INFO1_READ;
+
+ if (policy.readModeAP == ReadModeAP.ALL)
+ {
+ readAttr |= Command.INFO1_READ_MODE_AP_ALL;
+ }
+
+ WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0);
+
+ policy.filterExp?.Write(this);
+
+ int fieldSizeOffset = dataOffset;
+ WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
+
+ ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0;
+ prev = null;
+
+ for (int i = 0; i < max; i++)
+ {
+ int index = offsets[i];
+ ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ BatchRead record = records[index];
+ Key key = record.key;
+ string[] binNames = record.binNames;
+ Operation[] ops = record.ops;
+ byte[] digest = key.digest;
+ Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length);
+ dataOffset += digest.Length;
+
+ // Avoid relatively expensive full equality checks for performance reasons.
+ // Use reference equality only in hope that common namespaces/bin names are set from
+ // fixed variables. It's fine if equality not determined correctly because it just
+ // results in more space used. The batch will still be correct.
+ if (prev != null && prev.key.ns == key.ns && prev.key.setName == key.setName &&
+ prev.binNames == binNames && prev.readAllBins == record.readAllBins &&
+ prev.ops == ops)
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
+ }
+ else
+ {
+ // Write full header, namespace and bin names.
+ dataBuffer[dataOffset++] = BATCH_MSG_READ;
+
+ if (binNames != null && binNames.Length != 0)
+ {
+ dataBuffer[dataOffset++] = (byte)readAttr;
+ WriteBatchFields(key, 0, binNames.Length);
+
+ foreach (string binName in binNames)
+ {
+ WriteOperation(binName, Operation.Type.READ);
+ }
+ }
+ else if (ops != null)
+ {
+ int offset = dataOffset++;
+ WriteBatchFields(key, 0, ops.Length);
+ dataBuffer[offset] = (byte)WriteReadOnlyOperations(ops, readAttr);
+ }
+ else
+ {
+ dataBuffer[dataOffset++] = (byte)(readAttr | (record.readAllBins ? Command.INFO1_GET_ALL : Command.INFO1_NOBINDATA));
+ WriteBatchFields(key, 0, 0);
+ }
+ prev = record;
+ }
+ }
+
+ // Write real field size.
+ ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset);
+ End(compress);
+ }
+
+ public virtual void SetBatchRead
+ (
+ BatchPolicy policy,
+ Key[] keys,
+ BatchNode batch,
+ string[] binNames,
+ Operation[] ops,
+ int readAttr
+ )
+ {
+ // Estimate full row size
+ int[] offsets = batch.offsets;
+ int max = batch.offsetsSize;
+
+ // Estimate dataBuffer size.
+ Begin();
+ int fieldCount = 1;
+
+ if (policy.filterExp != null)
+ {
+ dataOffset += policy.filterExp.Size();
+ fieldCount++;
+ }
+ dataOffset += FIELD_HEADER_SIZE + 5;
+
+ Key prev = null;
+
+ for (int i = 0; i < max; i++)
+ {
+ Key key = keys[offsets[i]];
+
+ dataOffset += key.digest.Length + 4;
+
+ // Try reference equality in hope that namespace for all keys is set from a fixed variable.
+ if (prev != null && prev.ns == key.ns && prev.setName == key.setName)
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataOffset++;
+ }
+ else
+ {
+ // Estimate full header, namespace and bin names.
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6;
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
+
+ if (binNames != null)
+ {
+ foreach (String binName in binNames)
+ {
+ EstimateOperationSize(binName);
+ }
+ }
+ else if (ops != null)
+ {
+ foreach (Operation op in ops)
+ {
+ EstimateReadOperationSize(op);
+ }
+ }
+ prev = key;
+ }
+ }
+
+ bool compress = SizeBuffer(policy);
+
+ if (policy.readModeAP == ReadModeAP.ALL)
+ {
+ readAttr |= Command.INFO1_READ_MODE_AP_ALL;
+ }
+
+ WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0);
+
+ policy.filterExp?.Write(this);
+
+ int fieldSizeOffset = dataOffset;
+ WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
+
+ ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0;
+ prev = null;
+
+ for (int i = 0; i < max; i++)
+ {
+ int index = offsets[i];
+ ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ Key key = keys[index];
+ byte[] digest = key.digest;
+ Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length);
+ dataOffset += digest.Length;
+
+ // Try reference equality in hope that namespace for all keys is set from a fixed variable.
+ if (prev != null && prev.ns == key.ns && prev.setName == key.setName)
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
+ }
+ else
+ {
+ // Write full header, namespace and bin names.
+ dataBuffer[dataOffset++] = BATCH_MSG_READ;
+
+ if (binNames != null && binNames.Length != 0)
+ {
+ dataBuffer[dataOffset++] = (byte)readAttr;
+ WriteBatchFields(key, 0, binNames.Length);
+
+ foreach (String binName in binNames)
+ {
+ WriteOperation(binName, Operation.Type.READ);
+ }
+ }
+ else if (ops != null)
+ {
+ int offset = dataOffset++;
+ WriteBatchFields(key, 0, ops.Length);
+ dataBuffer[offset] = (byte)WriteReadOnlyOperations(ops, readAttr);
+ }
+ else
+ {
+ dataBuffer[dataOffset++] = (byte)readAttr;
+ WriteBatchFields(key, 0, 0);
+ }
+ prev = key;
+ }
+ }
+
+ // Write real field size.
+ ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset);
+ End(compress);
+ }
+
+ //--------------------------------------------------
+ // Batch Read/Write Operations
+ //--------------------------------------------------
+
+ public virtual void SetBatchOperate(
+ BatchPolicy policy,
+ IList records,
+ BatchNode batch)
+ {
+ BatchOffsetsNative offsets = new(batch);
+ SetBatchOperate(policy, records, offsets);
+ }
+
+ public void SetBatchOperate(
+ BatchPolicy policy,
+ IList records,
+ BatchOffsets offsets)
+ {
+ Begin();
+ int max = offsets.Size();
+ Txn txn = policy.Txn;
+ long?[] versions = null;
+
+ if (txn != null)
+ {
+ versions = new long?[max];
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ BatchRecord record = (BatchRecord)records[offset];
+ versions[i] = txn.GetReadVersion(record.key);
+ }
+ }
+
+ int fieldCount = 1;
+
+ if (policy.filterExp != null)
+ {
+ dataOffset += policy.filterExp.Size();
+ fieldCount++;
+ }
+
+ dataOffset += FIELD_HEADER_SIZE + 5;
+
+ BatchRecord prev = null;
+ long? verPrev = null;
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ BatchRecord record = (BatchRecord)records[offset];
+ Key key = record.key;
+ long? ver = versions?[i];
+
+ dataOffset += key.digest.Length + 4;
+
+ if (CanRepeat(policy, key, record, prev, ver, verPrev))
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataOffset++;
+ }
+ else
+ {
+ // Estimate full header, namespace and bin names.
+ dataOffset += 12;
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE;
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
+ SizeTxnBatch(txn, ver, record.hasWrite);
+ dataOffset += record.Size(policy);
+ prev = record;
+ verPrev = ver;
+ }
+ }
+ bool compress = SizeBuffer(policy);
+
+ WriteBatchHeader(policy, totalTimeout, fieldCount);
+
+ policy.filterExp?.Write(this);
+
+ int fieldSizeOffset = dataOffset;
+ WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
+
+ ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = GetBatchFlags(policy);
+
+ BatchAttr attr = new();
+ prev = null;
+ verPrev = null;
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ BatchRecord record = (BatchRecord)records[offset];
+ long? ver = versions?[i];
+ ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ Key key = record.key;
+ byte[] digest = key.digest;
+ Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length);
+ dataOffset += digest.Length;
+
+ if (CanRepeat(policy, key, record, prev, ver, verPrev))
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
+ }
+ else
+ {
+ // Write full message.
+ switch (record.GetBatchType())
+ {
+ case BatchRecord.Type.BATCH_READ:
+ {
+ BatchRead br = (BatchRead)record;
+
+ if (br.policy != null)
+ {
+ attr.SetRead(br.policy);
+ }
+ else
+ {
+ attr.SetRead(policy);
+ }
+
+ if (br.binNames != null)
+ {
+ if (br.binNames.Length > 0)
+ {
+ WriteBatchBinNames(key, txn, ver, br.binNames, attr, attr.filterExp);
+ }
+ else
+ {
+ attr.AdjustRead(true);
+ WriteBatchRead(key, txn, ver, attr, attr.filterExp, 0);
+ }
+ }
+ else if (br.ops != null)
+ {
+ attr.AdjustRead(br.ops);
+ WriteBatchOperations(key, txn, ver, br.ops, attr, attr.filterExp);
+ }
+ else
+ {
+ attr.AdjustRead(br.readAllBins);
+ WriteBatchRead(key, txn, ver, attr, attr.filterExp, 0);
+ }
+ break;
+ }
+
+ case BatchRecord.Type.BATCH_WRITE:
+ {
+ BatchWrite bw = (BatchWrite)record;
+
+ if (bw.policy != null)
+ {
+ attr.SetWrite(bw.policy);
+ }
+ else
+ {
+ attr.SetWrite(policy);
+ }
+ attr.AdjustWrite(bw.ops);
+ WriteBatchOperations(key, txn, ver, bw.ops, attr, attr.filterExp);
+ break;
+ }
+
+ case BatchRecord.Type.BATCH_UDF:
+ {
+ BatchUDF bu = (BatchUDF)record;
+
+ if (bu.policy != null)
+ {
+ attr.SetUDF(bu.policy);
+ }
+ else
+ {
+ attr.SetUDF(policy);
+ }
+ WriteBatchWrite(key, txn, ver, attr, attr.filterExp, 3, 0);
+ WriteField(bu.packageName, FieldType.UDF_PACKAGE_NAME);
+ WriteField(bu.functionName, FieldType.UDF_FUNCTION);
+ WriteField(bu.argBytes, FieldType.UDF_ARGLIST);
+ break;
+ }
+
+ case BatchRecord.Type.BATCH_DELETE:
+ {
+ BatchDelete bd = (BatchDelete)record;
+
+ if (bd.policy != null)
+ {
+ attr.SetDelete(bd.policy);
+ }
+ else
+ {
+ attr.SetDelete(policy);
+ }
+ WriteBatchWrite(key, txn, ver, attr, attr.filterExp, 0, 0);
+ break;
+ }
+ }
+ prev = record;
+ verPrev = ver;
+ }
+ }
+
+ // Write real field size.
+ ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset);
+ End(compress);
+ }
+
+ public virtual void SetBatchOperate
+ (
+ BatchPolicy policy,
+ Key[] keys,
+ BatchNode batch,
+ string[] binNames,
+ Operation[] ops,
+ BatchAttr attr
+ )
+ {
+ BatchOffsetsNative offsets = new(batch);
+ SetBatchOperate(policy, keys, binNames, ops, attr, offsets);
+ }
+
+ public void SetBatchOperate(
+ BatchPolicy policy,
+ Key[] keys,
+ string[] binNames,
+ Operation[] ops,
+ BatchAttr attr,
+ BatchOffsets offsets
+ )
+ {
+ // Estimate full row size
+ int max = offsets.Size();
+ Txn txn = policy.Txn;
+ long?[] versions = null;
+
+ Begin();
+
+ if (txn != null)
+ {
+ versions = new long?[max];
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ Key key = keys[offset];
+ versions[i] = txn.GetReadVersion(key);
+ }
+ }
+
+ Expression exp = GetBatchExpression(policy, attr);
+ int fieldCount = 1;
+
+ if (exp != null)
+ {
+ dataOffset += exp.Size();
+ fieldCount++;
+ }
+
+ dataOffset += FIELD_HEADER_SIZE + 5;
+
+ Key keyPrev = null;
+ long? verPrev = null;
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ Key key = keys[offset];
+ long? ver = versions?[i];
+
+ dataOffset += key.digest.Length + 4;
+
+ if (CanRepeat(attr, key, keyPrev, ver, verPrev))
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataOffset++;
+ }
+ else
+ {
+ // Write full header and namespace/set/bin names.
+ dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE;
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
+ SizeTxnBatch(txn, ver, attr.hasWrite);
+
+ if (attr.sendKey)
+ {
+ dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1;
+ }
+
+ if (binNames != null)
+ {
+ foreach (string binName in binNames)
+ {
+ EstimateOperationSize(binName);
+ }
+ }
+ else if (ops != null)
+ {
+ foreach (Operation op in ops)
+ {
+ if (Operation.IsWrite(op.type))
+ {
+ if (!attr.hasWrite)
+ {
+ throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in batch read");
+ }
+ dataOffset += 2; // Extra write specific fields.
+ }
+ EstimateOperationSize(op);
+ }
+ }
+ else if ((attr.writeAttr & Command.INFO2_DELETE) != 0)
+ {
+ dataOffset += 2; // Extra write specific fields.
+ }
+ keyPrev = key;
+ verPrev = ver;
+ }
+ }
+
+ bool compress = SizeBuffer(policy);
+
+ WriteBatchHeader(policy, totalTimeout, fieldCount);
+
+ exp?.Write(this);
+
+ int fieldSizeOffset = dataOffset;
+ WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
+
+ ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = GetBatchFlags(policy);
+ keyPrev = null;
+ verPrev = null;
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ Key key = keys[offset];
+ long? ver = versions?[i];
+
+ ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ byte[] digest = key.digest;
+ Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length);
+ dataOffset += digest.Length;
+
+ if (CanRepeat(attr, key, keyPrev, ver, verPrev))
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
+ }
+ else
+ {
+ // Write full message.
+ if (binNames != null)
+ {
+ WriteBatchBinNames(key, txn, ver, binNames, attr, null);
+ }
+ else if (ops != null)
+ {
+ WriteBatchOperations(key, txn, ver, ops, attr, null);
+ }
+ else if ((attr.writeAttr & Command.INFO2_DELETE) != 0)
+ {
+ WriteBatchWrite(key, txn, ver, attr, null, 0, 0);
+ }
+ else
+ {
+ WriteBatchRead(key, txn, ver, attr, null, 0);
+ }
+ keyPrev = key;
+ verPrev = ver;
+ }
+ }
+
+ // Write real field size.
+ ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset);
+ End(compress);
+ }
+
+ public virtual void SetBatchUDF(
+ BatchPolicy policy,
+ Key[] keys,
+ BatchNode batch,
+ string packageName,
+ string functionName,
+ byte[] argBytes,
+ BatchAttr attr
+ )
+ {
+ BatchOffsetsNative offsets = new(batch);
+ SetBatchUDF(policy, keys, packageName, functionName, argBytes, attr, offsets);
+ }
+
+ public virtual void SetBatchUDF
+ (
+ BatchPolicy policy,
+ Key[] keys,
+ string packageName,
+ string functionName,
+ byte[] argBytes,
+ BatchAttr attr,
+ BatchOffsets offsets
+ )
+ {
+ // Estimate buffer size.
+ Begin();
+ int max = offsets.Size();
+ Txn txn = policy.Txn;
+ long?[] versions = null;
+
+ if (txn != null)
+ {
+ versions = new long?[max];
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ Key key = keys[offset];
+ versions[i] = txn.GetReadVersion(key);
+ }
+ }
+
+ Expression exp = GetBatchExpression(policy, attr);
+ int fieldCount = 1;
+
+ if (exp != null)
+ {
+ dataOffset += exp.Size();
+ fieldCount++;
+ }
+
+ dataOffset += FIELD_HEADER_SIZE + 5;
+
+ Key keyPrev = null;
+ long? verPrev = null;
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ Key key = keys[offset];
+ long? ver = versions?[i];
+
+ dataOffset += key.digest.Length + 4;
+
+ if (CanRepeat(attr, key, keyPrev, ver, verPrev))
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataOffset++;
+ }
+ else
+ {
+ // Write full header and namespace/set/bin names.
+ dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE;
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
+ SizeTxnBatch(txn, ver, attr.hasWrite);
+
+ if (attr.sendKey)
+ {
+ dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1;
+ }
+ dataOffset += 2; // gen(2) = 2
+ EstimateUdfSize(packageName, functionName, argBytes);
+ keyPrev = key;
+ verPrev = ver;
+ }
+ }
+
+ bool compress = SizeBuffer(policy);
+
+ WriteBatchHeader(policy, totalTimeout, fieldCount);
+
+ exp?.Write(this);
+
+ int fieldSizeOffset = dataOffset;
+ WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
+
+ ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = GetBatchFlags(policy);
+ keyPrev = null;
+ verPrev = null;
+
+ for (int i = 0; i < max; i++)
+ {
+ int offset = offsets.Get(i);
+ Key key = keys[offset];
+ long? ver = versions?[i];
+
+ ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ byte[] digest = key.digest;
+ Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length);
+ dataOffset += digest.Length;
+
+ if (CanRepeat(attr, key, keyPrev, ver, verPrev))
+ {
+ // Can set repeat previous namespace/bin names to save space.
+ dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
+ }
+ else
+ {
+ // Write full message.
+ WriteBatchWrite(key, txn, ver, attr, null, 3, 0);
+ WriteField(packageName, FieldType.UDF_PACKAGE_NAME);
+ WriteField(functionName, FieldType.UDF_FUNCTION);
+ WriteField(argBytes, FieldType.UDF_ARGLIST);
+ keyPrev = key;
+ verPrev = ver;
+ }
+ }
+
+ // Write real field size.
+ ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset);
+ End(compress);
+ }
+
+ private static bool CanRepeat(
+ Policy policy,
+ Key key,
+ BatchRecord record,
+ BatchRecord prev,
+ long? ver,
+ long? verPrev
+ )
+ {
+ // Avoid relatively expensive full equality checks for performance reasons.
+ // Use reference equality only in hope that common namespaces/bin names are set from
+ // fixed variables. It's fine if equality not determined correctly because it just
+ // results in more space used. The batch will still be correct.
+ // Same goes for ver reference equality check.
+ return !policy.sendKey && verPrev == ver && prev != null && prev.key.ns == key.ns &&
+ prev.key.setName == key.setName && record.Equals(prev);
+ }
+
+ private static bool CanRepeat(BatchAttr attr, Key key, Key keyPrev, long? ver, long? verPrev)
+ {
+ return !attr.sendKey && verPrev == ver && keyPrev != null && keyPrev.ns == key.ns &&
+ keyPrev.setName == key.setName;
+ }
+
+ private static bool CanRepeat(Key key, Key keyPrev, long? ver, long? verPrev)
+ {
+ return verPrev == ver && keyPrev != null && keyPrev.ns == key.ns &&
+ keyPrev.setName == key.setName;
+ }
+
+ private static Expression GetBatchExpression(Policy policy, BatchAttr attr)
+ {
+ return attr.filterExp ?? policy.filterExp;
+ }
+
+ private static byte GetBatchFlags(BatchPolicy policy)
+ {
+ byte flags = 0x8;
+
+ if (policy.allowInline)
+ {
+ flags |= 0x1;
+ }
+
+ if (policy.allowInlineSSD)
+ {
+ flags |= 0x2;
+ }
+
+ if (policy.respondAllKeys)
+ {
+ flags |= 0x4;
+ }
+ return flags;
+ }
+
+ private void SizeTxnBatch(Txn txn, long? ver, bool hasWrite)
+ {
+ if (txn != null)
+ {
+ dataOffset++; // Add info4 byte for MRT.
+ dataOffset += 8 + FIELD_HEADER_SIZE;
+
+ if (ver.HasValue)
+ {
+ dataOffset += 7 + FIELD_HEADER_SIZE;
+ }
+
+ if (hasWrite && txn.Deadline != 0)
+ {
+ dataOffset += 4 + FIELD_HEADER_SIZE;
+ }
+ }
+ }
+
+ private void WriteBatchHeader(Policy policy, int timeout, int fieldCount)
+ {
+ int readAttr = Command.INFO1_BATCH;
+
+ if (policy.compress)
+ {
+ readAttr |= Command.INFO1_COMPRESS_RESPONSE;
+ }
+
+ // Write all header data except total size which must be written last.
+ dataOffset += 8;
+ dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length.
+ dataBuffer[dataOffset++] = (byte)readAttr;
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = (byte)0;
+
+ Array.Clear(dataBuffer, dataOffset, 10);
+ dataOffset += 10;
+
+ dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset);
+ }
+
+ private void WriteBatchBinNames(Key key, Txn txn, long? ver, string[] binNames, BatchAttr attr, Expression filter)
+ {
+ WriteBatchRead(key, txn, ver, attr, filter, binNames.Length);
+
+ foreach (string binName in binNames)
+ {
+ WriteOperation(binName, Operation.Type.READ);
+ }
+ }
+
+ private void WriteBatchOperations(Key key, Txn txn, long? ver, Operation[] ops, BatchAttr attr, Expression filter)
+ {
+ if (attr.hasWrite)
+ {
+ WriteBatchWrite(key, txn, ver, attr, filter, 0, ops.Length);
+ }
+ else
+ {
+ WriteBatchRead(key, txn, ver, attr, filter, ops.Length);
+ }
+
+ foreach (Operation op in ops)
+ {
+ WriteOperation(op);
+ }
+ }
+
+ private void WriteBatchRead(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int opCount)
+ {
+ if (txn != null)
+ {
+ dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_TTL);
+ dataBuffer[dataOffset++] = (byte)attr.readAttr;
+ dataBuffer[dataOffset++] = (byte)attr.writeAttr;
+ dataBuffer[dataOffset++] = (byte)attr.infoAttr;
+ dataBuffer[dataOffset++] = (byte)attr.txnAttr;
+ ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset);
+ dataOffset += 4;
+ WriteBatchFieldsTxn(key, txn, ver, attr, filter, 0, opCount);
+ }
+ else
+ {
+ dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_TTL);
+ dataBuffer[dataOffset++] = (byte)attr.readAttr;
+ dataBuffer[dataOffset++] = (byte)attr.writeAttr;
+ dataBuffer[dataOffset++] = (byte)attr.infoAttr;
+ ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset);
+ dataOffset += 4;
+ WriteBatchFieldsReg(key, attr, filter, 0, opCount);
+ }
+ }
+
+ private void WriteBatchWrite(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount)
+ {
+ if (txn != null)
+ {
+ dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_GEN | BATCH_MSG_TTL);
+ dataBuffer[dataOffset++] = (byte)attr.readAttr;
+ dataBuffer[dataOffset++] = (byte)attr.writeAttr;
+ dataBuffer[dataOffset++] = (byte)attr.infoAttr;
+ dataBuffer[dataOffset++] = (byte)attr.txnAttr;
+ ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset);
+ dataOffset += 2;
+ ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset);
+ dataOffset += 4;
+ WriteBatchFieldsTxn(key, txn, ver, attr, filter, fieldCount, opCount);
+ }
+ else
+ {
+ dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_GEN | BATCH_MSG_TTL);
+ dataBuffer[dataOffset++] = (byte)attr.readAttr;
+ dataBuffer[dataOffset++] = (byte)attr.writeAttr;
+ dataBuffer[dataOffset++] = (byte)attr.infoAttr;
+ ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset);
+ dataOffset += 2;
+ ByteUtil.IntToBytes((ushort)attr.expiration, dataBuffer, dataOffset);
+ dataOffset += 4;
+ WriteBatchFieldsReg(key, attr, filter, fieldCount, opCount);
+ }
+ }
+
+ private void WriteBatchFieldsTxn(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount)
+ {
+ fieldCount++;
+
+ if (ver.HasValue)
+ {
+ fieldCount++;
+ }
+
+ if (attr.hasWrite && txn.Deadline != 0)
+ {
+ fieldCount++;
+ }
+
+ if (filter != null)
+ {
+ fieldCount++;
+ }
+
+ if (attr.sendKey)
+ {
+ fieldCount++;
+ }
+
+ WriteBatchFields(key, fieldCount, opCount);
+
+ WriteFieldLE(txn.Id, FieldType.MRT_ID);
+
+ if (ver.HasValue)
+ {
+ WriteFieldVersion(ver.Value);
+ }
+
+ if (attr.hasWrite && txn.Deadline != 0)
+ {
+ WriteFieldLE(txn.Deadline, FieldType.MRT_DEADLINE);
+ }
+
+ filter?.Write(this);
+
+ if (attr.sendKey)
+ {
+ WriteField(key.userKey, FieldType.KEY);
+ }
+ }
+
+ private void WriteBatchFieldsReg(
+ Key key,
+ BatchAttr attr,
+ Expression filter,
+ int fieldCount,
+ int opCount
+ )
+ {
+ if (filter != null)
+ {
+ fieldCount++;
+ }
+
+ if (attr.sendKey)
+ {
+ fieldCount++;
+ }
+
+ WriteBatchFields(key, fieldCount, opCount);
+
+ filter?.Write(this);
+
+ if (attr.sendKey)
+ {
+ WriteField(key.userKey, FieldType.KEY);
+ }
+ }
+
+ private void WriteBatchFields(Key key, int fieldCount, int opCount)
+ {
+ fieldCount += 2;
+ ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
+ dataOffset += 2;
+ ByteUtil.ShortToBytes((ushort)opCount, dataBuffer, dataOffset);
+ dataOffset += 2;
+ WriteField(key.ns, FieldType.NAMESPACE);
+ WriteField(key.setName, FieldType.TABLE);
+ }
+
+ //--------------------------------------------------
+ // Scan
+ //--------------------------------------------------
+
+ public virtual void SetScan
+ (
+ Cluster cluster,
+ ScanPolicy policy,
+ string ns,
+ string setName,
+ string[] binNames,
+ ulong taskId,
+ NodePartitions nodePartitions
+ )
+ {
+ Begin();
+ int fieldCount = 0;
+ int partsFullSize = nodePartitions.partsFull.Count * 2;
+ int partsPartialSize = nodePartitions.partsPartial.Count * 20;
+ long maxRecords = nodePartitions.recordMax;
+
+ if (ns != null)
+ {
+ dataOffset += ByteUtil.EstimateSizeUtf8(ns) + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ if (setName != null)
+ {
+ dataOffset += ByteUtil.EstimateSizeUtf8(setName) + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ if (partsFullSize > 0)
+ {
+ dataOffset += partsFullSize + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ if (partsPartialSize > 0)
+ {
+ dataOffset += partsPartialSize + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ if (maxRecords > 0)
+ {
+ dataOffset += 8 + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ if (policy.recordsPerSecond > 0)
+ {
+ dataOffset += 4 + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ if (policy.filterExp != null)
+ {
+ dataOffset += policy.filterExp.Size();
+ fieldCount++;
+ }
+
+ // Estimate scan timeout size.
+ dataOffset += 4 + FIELD_HEADER_SIZE;
+ fieldCount++;
+
+ // Estimate taskId size.
+ dataOffset += 8 + FIELD_HEADER_SIZE;
+ fieldCount++;
+
+ if (binNames != null)
+ {
+ foreach (string binName in binNames)
+ {
+ EstimateOperationSize(binName);
+ }
+ }
+
+ SizeBuffer();
+ int readAttr = Command.INFO1_READ;
+
+ if (!policy.includeBinData)
+ {
+ readAttr |= Command.INFO1_NOBINDATA;
+ }
+
+ // Clusters that support partition queries also support not sending partition done messages.
+ int operationCount = (binNames == null) ? 0 : binNames.Length;
+ WriteHeaderRead(policy, totalTimeout, readAttr, 0, Command.INFO3_PARTITION_DONE, fieldCount, operationCount);
+
+ if (ns != null)
+ {
+ WriteField(ns, FieldType.NAMESPACE);
+ }
+
+ if (setName != null)
+ {
+ WriteField(setName, FieldType.TABLE);
+ }
+
+ if (partsFullSize > 0)
+ {
+ WriteFieldHeader(partsFullSize, FieldType.PID_ARRAY);
+
+ foreach (PartitionStatus part in nodePartitions.partsFull)
+ {
+ ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset);
+ dataOffset += 2;
+ }
+ }
+
+ if (partsPartialSize > 0)
+ {
+ WriteFieldHeader(partsPartialSize, FieldType.DIGEST_ARRAY);
+
+ foreach (PartitionStatus part in nodePartitions.partsPartial)
+ {
+ Array.Copy(part.digest, 0, dataBuffer, dataOffset, 20);
+ dataOffset += 20;
+ }
+ }
+
+ if (maxRecords > 0)
+ {
+ WriteField((ulong)maxRecords, FieldType.MAX_RECORDS);
+ }
+
+ if (policy.recordsPerSecond > 0)
+ {
+ WriteField(policy.recordsPerSecond, FieldType.RECORDS_PER_SECOND);
+ }
+
+ policy.filterExp?.Write(this);
+
+ // Write scan timeout
+ WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT);
+
+ // Write taskId field
+ WriteField(taskId, FieldType.QUERY_ID);
+
+ if (binNames != null)
+ {
+ foreach (string binName in binNames)
+ {
+ WriteOperation(binName, Operation.Type.READ);
+ }
+ }
+ End();
+ }
+
+ //--------------------------------------------------
+ // Query
+ //--------------------------------------------------
+
+ protected virtual internal void SetQuery
+ (
+ Cluster cluster,
+ Policy policy,
+ Statement statement,
+ ulong taskId,
+ bool background,
+ NodePartitions nodePartitions
+ )
+ {
+ byte[] functionArgBuffer = null;
+ int fieldCount = 0;
+ int filterSize = 0;
+ int binNameSize = 0;
+ bool isNew = cluster.hasPartitionQuery;
+
+ Begin();
+
+ if (statement.ns != null)
+ {
+ dataOffset += ByteUtil.EstimateSizeUtf8(statement.ns) + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ if (statement.setName != null)
+ {
+ dataOffset += ByteUtil.EstimateSizeUtf8(statement.setName) + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ // Estimate recordsPerSecond field size. This field is used in new servers and not used
+ // (but harmless to add) in old servers.
+ if (statement.recordsPerSecond > 0)
+ {
+ dataOffset += 4 + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ // Estimate socket timeout field size. This field is used in new servers and not used
+ // (but harmless to add) in old servers.
+ dataOffset += 4 + FIELD_HEADER_SIZE;
+ fieldCount++;
+
+ // Estimate taskId field.
+ dataOffset += 8 + FIELD_HEADER_SIZE;
+ fieldCount++;
+
+ byte[] packedCtx = null;
+
+ if (statement.filter != null)
+ {
+ IndexCollectionType type = statement.filter.CollectionType;
+
+ // Estimate INDEX_TYPE field.
+ if (type != IndexCollectionType.DEFAULT)
+ {
+ dataOffset += FIELD_HEADER_SIZE + 1;
+ fieldCount++;
+ }
+
+ // Estimate INDEX_RANGE field.
+ dataOffset += FIELD_HEADER_SIZE;
+ filterSize++; // num filters
+ filterSize += statement.filter.EstimateSize();
+ dataOffset += filterSize;
+ fieldCount++;
+
+ if (!isNew)
+ {
+ // Query bin names are specified as a field (Scan bin names are specified later as operations)
+ // in old servers. Estimate size for selected bin names.
+ if (statement.binNames != null && statement.binNames.Length > 0)
+ {
+ dataOffset += FIELD_HEADER_SIZE;
+ binNameSize++; // num bin names
+
+ foreach (string binName in statement.binNames)
+ {
+ binNameSize += ByteUtil.EstimateSizeUtf8(binName) + 1;
+ }
+ dataOffset += binNameSize;
+ fieldCount++;
+ }
+ }
+
+ packedCtx = statement.filter.PackedCtx;
+
+ if (packedCtx != null)
+ {
+ dataOffset += FIELD_HEADER_SIZE + packedCtx.Length;
+ fieldCount++;
+ }
+ }
+
+ // Estimate aggregation/background function size.
+ if (statement.functionName != null)
+ {
+ dataOffset += FIELD_HEADER_SIZE + 1; // udf type
+ dataOffset += ByteUtil.EstimateSizeUtf8(statement.packageName) + FIELD_HEADER_SIZE;
+ dataOffset += ByteUtil.EstimateSizeUtf8(statement.functionName) + FIELD_HEADER_SIZE;
+
+ if (statement.functionArgs.Length > 0)
+ {
+ functionArgBuffer = Packer.Pack(statement.functionArgs);
+ }
+ else
+ {
+ functionArgBuffer = Array.Empty();
+ }
+ dataOffset += FIELD_HEADER_SIZE + functionArgBuffer.Length;
+ fieldCount += 4;
+ }
+
+ if (policy.filterExp != null)
+ {
+ dataOffset += policy.filterExp.Size();
+ fieldCount++;
+ }
+
+ long maxRecords = 0;
+ int partsFullSize = 0;
+ int partsPartialDigestSize = 0;
+ int partsPartialBValSize = 0;
+
+ if (nodePartitions != null)
+ {
+ partsFullSize = nodePartitions.partsFull.Count * 2;
+ partsPartialDigestSize = nodePartitions.partsPartial.Count * 20;
+
+ if (statement.filter != null)
+ {
+ partsPartialBValSize = nodePartitions.partsPartial.Count * 8;
+ }
+ maxRecords = nodePartitions.recordMax;
+ }
+
+ if (partsFullSize > 0)
+ {
+ dataOffset += partsFullSize + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ if (partsPartialDigestSize > 0)
+ {
+ dataOffset += partsPartialDigestSize + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ if (partsPartialBValSize > 0)
+ {
+ dataOffset += partsPartialBValSize + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ // Estimate max records field size. This field is used in new servers and not used
+ // (but harmless to add) in old servers.
+ if (maxRecords > 0)
+ {
+ dataOffset += 8 + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ // Operations (used in query execute) and bin names (used in scan/query) are mutually exclusive.
+ int operationCount = 0;
+
+ if (statement.operations != null)
+ {
+ // Estimate size for background operations.
+ if (!background)
+ {
+ throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Operations not allowed in foreground query");
+ }
+
+ foreach (Operation operation in statement.operations)
+ {
+ if (!Operation.IsWrite(operation.type))
+ {
+ throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Read operations not allowed in background query");
+ }
+ EstimateOperationSize(operation);
+ }
+ operationCount = statement.operations.Length;
+ }
+ else if (statement.binNames != null && (isNew || statement.filter == null))
+ {
+ // Estimate size for selected bin names (query bin names already handled for old servers).
+ foreach (string binName in statement.binNames)
+ {
+ EstimateOperationSize(binName);
+ }
+ operationCount = statement.binNames.Length;
+ }
+
+ SizeBuffer();
+
+ if (background)
+ {
+ WriteHeaderWrite((WritePolicy)policy, Command.INFO2_WRITE, fieldCount, operationCount);
+ }
+ else
+ {
+ QueryPolicy qp = (QueryPolicy)policy;
+ int readAttr = Command.INFO1_READ;
+ int writeAttr = 0;
+
+ if (!qp.includeBinData)
+ {
+ readAttr |= Command.INFO1_NOBINDATA;
+ }
+
+ if (qp.shortQuery || qp.expectedDuration == QueryDuration.SHORT)
+ {
+ readAttr |= Command.INFO1_SHORT_QUERY;
+ }
+ else if (qp.expectedDuration == QueryDuration.LONG_RELAX_AP)
+ {
+ writeAttr |= Command.INFO2_RELAX_AP_LONG_QUERY;
+ }
+
+ int infoAttr = (isNew || statement.filter == null) ? Command.INFO3_PARTITION_DONE : 0;
+
+ WriteHeaderRead(policy, totalTimeout, readAttr, writeAttr, infoAttr, fieldCount, operationCount);
+ }
+
+ if (statement.ns != null)
+ {
+ WriteField(statement.ns, FieldType.NAMESPACE);
+ }
+
+ if (statement.setName != null)
+ {
+ WriteField(statement.setName, FieldType.TABLE);
+ }
+
+ // Write records per second.
+ if (statement.recordsPerSecond > 0)
+ {
+ WriteField(statement.recordsPerSecond, FieldType.RECORDS_PER_SECOND);
+ }
+
+ // Write socket idle timeout.
+ WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT);
+
+ // Write taskId field
+ WriteField(taskId, FieldType.QUERY_ID);
+
+ if (statement.filter != null)
+ {
+ IndexCollectionType type = statement.filter.CollectionType;
+
+ if (type != IndexCollectionType.DEFAULT)
+ {
+ WriteFieldHeader(1, FieldType.INDEX_TYPE);
+ dataBuffer[dataOffset++] = (byte)type;
+ }
+
+ WriteFieldHeader(filterSize, FieldType.INDEX_RANGE);
+ dataBuffer[dataOffset++] = (byte)1;
+ dataOffset = statement.filter.Write(dataBuffer, dataOffset);
+
+ if (!isNew)
+ {
+ // Query bin names are specified as a field (Scan bin names are specified later as operations)
+ // in old servers.
+ if (statement.binNames != null && statement.binNames.Length > 0)
+ {
+ WriteFieldHeader(binNameSize, FieldType.QUERY_BINLIST);
+ dataBuffer[dataOffset++] = (byte)statement.binNames.Length;
+
+ foreach (string binName in statement.binNames)
+ {
+ int len = ByteUtil.StringToUtf8(binName, dataBuffer, dataOffset + 1);
+ dataBuffer[dataOffset] = (byte)len;
+ dataOffset += len + 1;
+ }
+ }
+ }
+
+ if (packedCtx != null)
+ {
+ WriteFieldHeader(packedCtx.Length, FieldType.INDEX_CONTEXT);
+ Array.Copy(packedCtx, 0, dataBuffer, dataOffset, packedCtx.Length);
+ dataOffset += packedCtx.Length;
+ }
+ }
+
+ if (statement.functionName != null)
+ {
+ WriteFieldHeader(1, FieldType.UDF_OP);
+ dataBuffer[dataOffset++] = background ? (byte)2 : (byte)1;
+ WriteField(statement.packageName, FieldType.UDF_PACKAGE_NAME);
+ WriteField(statement.functionName, FieldType.UDF_FUNCTION);
+ WriteField(functionArgBuffer, FieldType.UDF_ARGLIST);
+ }
+
+ policy.filterExp?.Write(this);
+
+ if (partsFullSize > 0)
+ {
+ WriteFieldHeader(partsFullSize, FieldType.PID_ARRAY);
+
+ foreach (PartitionStatus part in nodePartitions.partsFull)
+ {
+ ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset);
+ dataOffset += 2;
+ }
+ }
+
+ if (partsPartialDigestSize > 0)
+ {
+ WriteFieldHeader(partsPartialDigestSize, FieldType.DIGEST_ARRAY);
+
+ foreach (PartitionStatus part in nodePartitions.partsPartial)
+ {
+ Array.Copy(part.digest, 0, dataBuffer, dataOffset, 20);
+ dataOffset += 20;
+ }
+ }
+
+ if (partsPartialBValSize > 0)
+ {
+ WriteFieldHeader(partsPartialBValSize, FieldType.BVAL_ARRAY);
+
+ foreach (PartitionStatus part in nodePartitions.partsPartial)
+ {
+ ByteUtil.LongToLittleBytes(part.bval, dataBuffer, dataOffset);
+ dataOffset += 8;
+ }
+ }
+
+ if (maxRecords > 0)
+ {
+ WriteField((ulong)maxRecords, FieldType.MAX_RECORDS);
+ }
+
+ if (statement.operations != null)
+ {
+ foreach (Operation operation in statement.operations)
+ {
+ WriteOperation(operation);
+ }
+ }
+ else if (statement.binNames != null && (isNew || statement.filter == null))
+ {
+ foreach (string binName in statement.binNames)
+ {
+ WriteOperation(binName, Operation.Type.READ);
+ }
+ }
+ End();
+ }
+
+ //--------------------------------------------------
+ // Command Sizing
+ //--------------------------------------------------
+
+ private int EstimateKeyAttrSize(Policy policy, Key key, BatchAttr attr, Expression filterExp)
+ {
+ int fieldCount = EstimateKeySize(policy, key, attr.hasWrite);
+
+ if (filterExp != null)
+ {
+ dataOffset += filterExp.Size();
+ fieldCount++;
+ }
+ return fieldCount;
+ }
+
+ private int EstimateKeySize(Policy policy, Key key, bool hasWrite)
+ {
+ int fieldCount = EstimateKeySize(key);
+
+ fieldCount += SizeTxn(key, policy.Txn, hasWrite);
+
+ if (policy.sendKey)
+ {
+ dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1;
+ fieldCount++;
+ }
+ return fieldCount;
+ }
+
+ private int EstimateKeySize(Key key)
+ {
+ int fieldCount = 0;
+
+ if (key.ns != null)
+ {
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ if (key.setName != null)
+ {
+ dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ dataOffset += key.digest.Length + FIELD_HEADER_SIZE;
+ fieldCount++;
+
+ return fieldCount;
+ }
+
+ private int EstimateUdfSize(string packageName, string functionName, byte[] bytes)
+ {
+ dataOffset += ByteUtil.EstimateSizeUtf8(packageName) + FIELD_HEADER_SIZE;
+ dataOffset += ByteUtil.EstimateSizeUtf8(functionName) + FIELD_HEADER_SIZE;
+ dataOffset += bytes.Length + FIELD_HEADER_SIZE;
+ return 3;
+ }
+
+ private void EstimateOperationSize(Bin bin)
+ {
+ dataOffset += ByteUtil.EstimateSizeUtf8(bin.name) + OPERATION_HEADER_SIZE;
+ dataOffset += bin.value.EstimateSize();
+ }
+
+ private void EstimateOperationSize(Operation operation)
+ {
+ dataOffset += ByteUtil.EstimateSizeUtf8(operation.binName) + OPERATION_HEADER_SIZE;
+ dataOffset += operation.value.EstimateSize();
+ }
+
+ private void EstimateReadOperationSize(Operation operation)
+ {
+ if (Operation.IsWrite(operation.type))
+ {
+ throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in batch read");
+ }
+ dataOffset += ByteUtil.EstimateSizeUtf8(operation.binName) + OPERATION_HEADER_SIZE;
+ dataOffset += operation.value.EstimateSize();
+ }
+
+ private void EstimateOperationSize(string binName)
+ {
+ dataOffset += ByteUtil.EstimateSizeUtf8(binName) + OPERATION_HEADER_SIZE;
+ }
+
+ private void EstimateOperationSize()
+ {
+ dataOffset += OPERATION_HEADER_SIZE;
+ }
+
+ //--------------------------------------------------
+ // Command Writes
+ //--------------------------------------------------
+
+ ///
+ /// Header write for write commands.
+ ///
+ private void WriteHeaderWrite(WritePolicy policy, int writeAttr, int fieldCount, int operationCount)
+ {
+ // Set flags.
+ int generation = 0;
+ int infoAttr = 0;
+
+ switch (policy.recordExistsAction)
+ {
+ case RecordExistsAction.UPDATE:
+ break;
+ case RecordExistsAction.UPDATE_ONLY:
+ infoAttr |= Command.INFO3_UPDATE_ONLY;
+ break;
+ case RecordExistsAction.REPLACE:
+ infoAttr |= Command.INFO3_CREATE_OR_REPLACE;
+ break;
+ case RecordExistsAction.REPLACE_ONLY:
+ infoAttr |= Command.INFO3_REPLACE_ONLY;
+ break;
+ case RecordExistsAction.CREATE_ONLY:
+ writeAttr |= Command.INFO2_CREATE_ONLY;
+ break;
+ }
+
+ switch (policy.generationPolicy)
+ {
+ case GenerationPolicy.NONE:
+ break;
+ case GenerationPolicy.EXPECT_GEN_EQUAL:
+ generation = policy.generation;
+ writeAttr |= Command.INFO2_GENERATION;
+ break;
+ case GenerationPolicy.EXPECT_GEN_GT:
+ generation = policy.generation;
+ writeAttr |= Command.INFO2_GENERATION_GT;
+ break;
+ }
+
+ if (policy.commitLevel == CommitLevel.COMMIT_MASTER)
+ {
+ infoAttr |= Command.INFO3_COMMIT_MASTER;
+ }
+
+ if (policy.durableDelete)
+ {
+ writeAttr |= Command.INFO2_DURABLE_DELETE;
+ }
+
+ dataOffset += 8;
+
+ // Write all header data except total size which must be written last.
+ dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length.
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = (byte)writeAttr;
+ dataBuffer[dataOffset++] = (byte)infoAttr;
+ dataBuffer[dataOffset++] = 0;
+ dataBuffer[dataOffset++] = 0; // clear the result code
+ dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes((uint)policy.expiration, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset);
+ }
+
+ ///
+ /// Header write for operate command.
+ ///
+ private void WriteHeaderReadWrite
+ (
+ WritePolicy policy,
+ OperateArgs args,
+ int fieldCount
+ )
+ {
+ // Set flags.
+ int generation = 0;
+ int ttl = args.hasWrite ? policy.expiration : policy.readTouchTtlPercent;
+ int readAttr = args.readAttr;
+ int writeAttr = args.writeAttr;
+ int infoAttr = 0;
+ int operationCount = args.operations.Length;
+
+ switch (policy.recordExistsAction)
+ {
+ case RecordExistsAction.UPDATE:
+ break;
+ case RecordExistsAction.UPDATE_ONLY:
+ infoAttr |= Command.INFO3_UPDATE_ONLY;
+ break;
+ case RecordExistsAction.REPLACE:
+ infoAttr |= Command.INFO3_CREATE_OR_REPLACE;
+ break;
+ case RecordExistsAction.REPLACE_ONLY:
+ infoAttr |= Command.INFO3_REPLACE_ONLY;
+ break;
+ case RecordExistsAction.CREATE_ONLY:
+ writeAttr |= Command.INFO2_CREATE_ONLY;
+ break;
+ }
+
+ switch (policy.generationPolicy)
+ {
+ case GenerationPolicy.NONE:
+ break;
+ case GenerationPolicy.EXPECT_GEN_EQUAL:
+ generation = policy.generation;
+ writeAttr |= Command.INFO2_GENERATION;
+ break;
+ case GenerationPolicy.EXPECT_GEN_GT:
+ generation = policy.generation;
+ writeAttr |= Command.INFO2_GENERATION_GT;
+ break;
+ }
+
+ if (policy.commitLevel == CommitLevel.COMMIT_MASTER)
+ {
+ infoAttr |= Command.INFO3_COMMIT_MASTER;
+ }
+
+ if (policy.durableDelete)
+ {
+ writeAttr |= Command.INFO2_DURABLE_DELETE;
+ }
+ switch (policy.readModeSC)
+ {
+ case ReadModeSC.SESSION:
+ break;
+ case ReadModeSC.LINEARIZE:
+ infoAttr |= Command.INFO3_SC_READ_TYPE;
+ break;
+ case ReadModeSC.ALLOW_REPLICA:
+ infoAttr |= Command.INFO3_SC_READ_RELAX;
+ break;
+ case ReadModeSC.ALLOW_UNAVAILABLE:
+ infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX;
+ break;
+ }
+
+ if (policy.readModeAP == ReadModeAP.ALL)
+ {
+ readAttr |= Command.INFO1_READ_MODE_AP_ALL;
+ }
+
+ if (policy.compress)
+ {
+ readAttr |= Command.INFO1_COMPRESS_RESPONSE;
+ }
+
+ dataOffset += 8;
+
+ // Write all header data except total size which must be written last.
+ dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length.
+ dataBuffer[dataOffset++] = (byte)readAttr;
+ dataBuffer[dataOffset++] = (byte)writeAttr;
+ dataBuffer[dataOffset++] = (byte)infoAttr;
+ dataBuffer[dataOffset++] = 0; // unused
+ dataBuffer[dataOffset++] = 0; // clear the result code
+ dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes((uint)ttl, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset);
+ }
+
+ ///
+ /// Header write for read commands.
+ ///
+ private void WriteHeaderRead
+ (
+ Policy policy,
+ int timeout,
+ int readAttr,
+ int writeAttr,
+ int infoAttr,
+ int fieldCount,
+ int operationCount
+ )
+ {
+ switch (policy.readModeSC)
+ {
+ case ReadModeSC.SESSION:
+ break;
+ case ReadModeSC.LINEARIZE:
+ infoAttr |= Command.INFO3_SC_READ_TYPE;
+ break;
+ case ReadModeSC.ALLOW_REPLICA:
+ infoAttr |= Command.INFO3_SC_READ_RELAX;
+ break;
+ case ReadModeSC.ALLOW_UNAVAILABLE:
+ infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX;
+ break;
+ }
+
+ if (policy.readModeAP == ReadModeAP.ALL)
+ {
+ readAttr |= Command.INFO1_READ_MODE_AP_ALL;
+ }
+
+ if (policy.compress)
+ {
+ readAttr |= Command.INFO1_COMPRESS_RESPONSE;
+ }
+
+ dataOffset += 8;
+
+ // Write all header data except total size which must be written last.
+ dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length.
+ dataBuffer[dataOffset++] = (byte)readAttr;
+ dataBuffer[dataOffset++] = (byte)writeAttr;
+ dataBuffer[dataOffset++] = (byte)infoAttr;
+
+ for (int i = 0; i < 6; i++)
+ {
+ dataBuffer[dataOffset++] = 0;
+ }
+ dataOffset += ByteUtil.IntToBytes((uint)policy.readTouchTtlPercent, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset);
+ }
+
+ ///
+ /// Header write for read header commands.
+ ///
+ private void WriteHeaderReadHeader(Policy policy, int readAttr, int fieldCount, int operationCount)
+ {
+ int infoAttr = 0;
+
+ switch (policy.readModeSC)
+ {
+ case ReadModeSC.SESSION:
+ break;
+ case ReadModeSC.LINEARIZE:
+ infoAttr |= Command.INFO3_SC_READ_TYPE;
+ break;
+ case ReadModeSC.ALLOW_REPLICA:
+ infoAttr |= Command.INFO3_SC_READ_RELAX;
+ break;
+ case ReadModeSC.ALLOW_UNAVAILABLE:
+ infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX;
+ break;
+ }
+
+ if (policy.readModeAP == ReadModeAP.ALL)
+ {
+ readAttr |= Command.INFO1_READ_MODE_AP_ALL;
+ }
+
+ dataOffset += 8;
+
+ // Write all header data except total size which must be written last.
+ dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length.
+ dataBuffer[dataOffset++] = (byte)readAttr;
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = (byte)infoAttr;
+
+ for (int i = 0; i < 6; i++)
+ {
+ dataBuffer[dataOffset++] = 0;
+ }
+ dataOffset += ByteUtil.IntToBytes((uint)policy.readTouchTtlPercent, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset);
+ }
+
+ ///
+ /// Header write for batch single commands.
+ ///
+ private void WriteKeyAttr(
+ Policy policy,
+ Key key,
+ BatchAttr attr,
+ Expression filterExp,
+ int fieldCount,
+ int operationCount
+ )
+ {
+ dataOffset += 8;
+
+ // Write all header data except total size which must be written last.
+ dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length.
+ dataBuffer[dataOffset++] = (byte)attr.readAttr;
+ dataBuffer[dataOffset++] = (byte)attr.writeAttr;
+ dataBuffer[dataOffset++] = (byte)attr.infoAttr;
+ dataBuffer[dataOffset++] = 0; // unused
+ dataBuffer[dataOffset++] = 0; // clear the result code
+ dataOffset += ByteUtil.IntToBytes((uint)attr.generation, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset);
+ dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset);
+
+ WriteKey(policy, key, attr.hasWrite);
+
+ filterExp?.Write(this);
+ }
+
+ private void WriteKey(Policy policy, Key key, bool sendDeadline)
+ {
+ WriteKey(key);
+ WriteTxn(policy.Txn, sendDeadline);
+
+ if (policy.sendKey)
+ {
+ WriteField(key.userKey, FieldType.KEY);
+ }
+ }
+
+ private void WriteKey(Key key)
+ {
+ // Write key into dataBuffer.
+ if (key.ns != null)
+ {
+ WriteField(key.ns, FieldType.NAMESPACE);
+ }
+
+ if (key.setName != null)
+ {
+ WriteField(key.setName, FieldType.TABLE);
+ }
+
+ WriteField(key.digest, FieldType.DIGEST_RIPE);
+ }
+
+ private int WriteReadOnlyOperations(Operation[] ops, int readAttr)
+ {
+ bool readBin = false;
+ bool readHeader = false;
+
+ foreach (Operation op in ops)
+ {
+ switch (op.type)
+ {
+ case Operation.Type.READ:
+ // Read all bins if no bin is specified.
+ if (op.binName == null)
+ {
+ readAttr |= Command.INFO1_GET_ALL;
+ }
+ readBin = true;
+ break;
+
+ case Operation.Type.READ_HEADER:
+ readHeader = true;
+ break;
+
+ default:
+ break;
+ }
+ WriteOperation(op);
+ }
+
+ if (readHeader && !readBin)
+ {
+ readAttr |= Command.INFO1_NOBINDATA;
+ }
+ return readAttr;
+ }
+
+ private void WriteOperation(Bin bin, Operation.Type operationType)
+ {
+ int nameLength = ByteUtil.StringToUtf8(bin.name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE);
+ int valueLength = bin.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength);
+
+ ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType);
+ dataBuffer[dataOffset++] = (byte)bin.value.Type;
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = (byte)nameLength;
+ dataOffset += nameLength + valueLength;
+ }
+
+ private void WriteOperation(Operation operation)
+ {
+ int nameLength = ByteUtil.StringToUtf8(operation.binName, dataBuffer, dataOffset + OPERATION_HEADER_SIZE);
+ int valueLength = operation.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength);
+
+ ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = Operation.GetProtocolType(operation.type);
+ dataBuffer[dataOffset++] = (byte)operation.value.Type;
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = (byte)nameLength;
+ dataOffset += nameLength + valueLength;
+ }
+
+ private void WriteOperation(string name, Operation.Type operationType)
+ {
+ int nameLength = ByteUtil.StringToUtf8(name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE);
+
+ ByteUtil.IntToBytes((uint)(nameLength + 4), dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType);
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = (byte)nameLength;
+ dataOffset += nameLength;
+ }
+
+ private void WriteOperation(Operation.Type operationType)
+ {
+ ByteUtil.IntToBytes(4, dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType);
+ dataBuffer[dataOffset++] = 0;
+ dataBuffer[dataOffset++] = 0;
+ dataBuffer[dataOffset++] = 0;
+ }
+
+ private int SizeTxn(Key key, Txn txn, bool hasWrite)
+ {
+ int fieldCount = 0;
+
+ if (txn != null)
+ {
+ dataOffset += 8 + FIELD_HEADER_SIZE;
+ fieldCount++;
+
+ Version = txn.GetReadVersion(key);
+
+ if (Version.HasValue)
+ {
+ dataOffset += 7 + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ if (hasWrite && txn.Deadline != 0)
+ {
+ dataOffset += 4 + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+ }
+ return fieldCount;
+ }
+
+ private void WriteTxn(Txn txn, bool sendDeadline)
+ {
+ if (txn != null)
+ {
+ WriteFieldLE(txn.Id, FieldType.MRT_ID);
+
+ if (Version.HasValue)
+ {
+ WriteFieldVersion(Version.Value);
+ }
+
+ if (sendDeadline && txn.Deadline != 0)
+ {
+ WriteFieldLE(txn.Deadline, FieldType.MRT_DEADLINE);
+ }
+ }
+ }
+
+ private void WriteFieldVersion(long ver)
+ {
+ WriteFieldHeader(7, FieldType.RECORD_VERSION);
+ ByteUtil.LongToVersionBytes(ver, dataBuffer, dataOffset);
+ dataOffset += 7;
+ }
+
+ private void WriteField(Value value, int type)
+ {
+ int offset = dataOffset + FIELD_HEADER_SIZE;
+ dataBuffer[offset++] = (byte)value.Type;
+ int len = value.Write(dataBuffer, offset) + 1;
+ WriteFieldHeader(len, type);
+ dataOffset += len;
+ }
+
+ private void WriteField(string str, int type)
+ {
+ int len = ByteUtil.StringToUtf8(str, dataBuffer, dataOffset + FIELD_HEADER_SIZE);
+ WriteFieldHeader(len, type);
+ dataOffset += len;
+ }
+
+ private void WriteField(byte[] bytes, int type)
+ {
+ Array.Copy(bytes, 0, dataBuffer, dataOffset + FIELD_HEADER_SIZE, bytes.Length);
+ WriteFieldHeader(bytes.Length, type);
+ dataOffset += bytes.Length;
+ }
+
+ private void WriteField(int val, int type)
+ {
+ WriteFieldHeader(4, type);
+ ByteUtil.IntToBytes((uint)val, dataBuffer, dataOffset);
+ dataOffset += 4;
+ }
+
+ private void WriteFieldLE(int val, int type)
+ {
+ WriteFieldHeader(4, type);
+ ByteUtil.IntToLittleBytes((uint)val, dataBuffer, dataOffset);
+ dataOffset += 4;
+ }
+
+ private void WriteField(ulong val, int type)
+ {
+ WriteFieldHeader(8, type);
+ ByteUtil.LongToBytes(val, dataBuffer, dataOffset);
+ dataOffset += 8;
+ }
+
+ private void WriteFieldLE(long val, int type)
+ {
+ WriteFieldHeader(8, type);
+ ByteUtil.LongToLittleBytes((ulong)val, dataBuffer, dataOffset);
+ dataOffset += 8;
+ }
+
+ private void WriteFieldHeader(int size, int type)
+ {
+ ByteUtil.IntToBytes((uint)size + 1, dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = (byte)type;
+ }
+
+ internal virtual void WriteExpHeader(int size)
+ {
+ WriteFieldHeader(size, FieldType.FILTER_EXP);
+ }
+
+ private void Begin()
+ {
+ dataOffset = MSG_TOTAL_HEADER_SIZE;
+ }
+
+ private bool SizeBuffer(Policy policy)
+ {
+ if (policy.compress && dataOffset > COMPRESS_THRESHOLD)
+ {
+ // Command will be compressed. First, write uncompressed command
+ // into separate dataBuffer. Save normal dataBuffer for compressed command.
+ // Normal dataBuffer in async mode is from dataBuffer pool that is used to
+ // minimize memory pinning during socket operations.
+ dataBuffer = new byte[dataOffset];
+ dataOffset = 0;
+ return true;
+ }
+ else
+ {
+ // Command will be uncompressed.
+ SizeBuffer();
+ return false;
+ }
+ }
+
+ private void End(bool compress)
+ {
+ if (!compress)
+ {
+ End();
+ return;
+ }
+
+ // Write proto header.
+ ulong size = ((ulong)dataOffset - 8) | (CL_MSG_VERSION << 56) | (AS_MSG_TYPE << 48);
+ ByteUtil.LongToBytes(size, dataBuffer, 0);
+
+ byte[] srcBuf = dataBuffer;
+ int srcSize = dataOffset;
+
+ // Increase requested dataBuffer size in case compressed dataBuffer size is
+ // greater than the uncompressed dataBuffer size.
+ dataOffset += 16 + 100;
+
+ // This method finds dataBuffer of requested size, resets dataOffset to segment offset
+ // and returns dataBuffer max size;
+ int trgBufSize = SizeBuffer();
+
+ // Compress to target starting at new dataOffset plus new header.
+ int trgSize = ByteUtil.Compress(srcBuf, srcSize, dataBuffer, dataOffset + 16, trgBufSize - 16) + 16;
+
+ ulong proto = ((ulong)trgSize - 8) | (CL_MSG_VERSION << 56) | (MSG_TYPE_COMPRESSED << 48);
+ ByteUtil.LongToBytes(proto, dataBuffer, dataOffset);
+ ByteUtil.LongToBytes((ulong)srcSize, dataBuffer, dataOffset + 8);
+ SetLength(trgSize);
+ }
+
+ protected internal abstract int SizeBuffer();
+ protected internal abstract void End();
+ protected internal abstract void SetLength(int length);
+
+ //--------------------------------------------------
+ // Response Parsing
+ //--------------------------------------------------
+
+ internal virtual void SkipKey(int fieldCount)
+ {
+ // There can be fields in the response (setname etc).
+ // But for now, ignore them. Expose them to the API if needed in the future.
+ for (int i = 0; i < fieldCount; i++)
+ {
+ int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset);
+ dataOffset += 4 + fieldlen;
+ }
+ }
+
+ internal virtual Key ParseKey(int fieldCount, out ulong bval)
+ {
+ byte[] digest = null;
+ string ns = null;
+ string setName = null;
+ Value userKey = null;
+ bval = 0;
+
+ for (int i = 0; i < fieldCount; i++)
+ {
+ int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ int fieldtype = dataBuffer[dataOffset++];
+ int size = fieldlen - 1;
+
+ switch (fieldtype)
+ {
+ case FieldType.DIGEST_RIPE:
+ digest = new byte[size];
+ Array.Copy(dataBuffer, dataOffset, digest, 0, size);
+ break;
+
+ case FieldType.NAMESPACE:
+ ns = ByteUtil.Utf8ToString(dataBuffer, dataOffset, size);
+ break;
+
+ case FieldType.TABLE:
+ setName = ByteUtil.Utf8ToString(dataBuffer, dataOffset, size);
+ break;
+
+ case FieldType.KEY:
+ int type = dataBuffer[dataOffset++];
+ size--;
+ userKey = ByteUtil.BytesToKeyValue((ParticleType)type, dataBuffer, dataOffset, size);
+ break;
+
+ case FieldType.BVAL_ARRAY:
+ bval = (ulong)ByteUtil.LittleBytesToLong(dataBuffer, dataOffset);
+ break;
+ }
+ dataOffset += size;
+ }
+ return new Key(ns, digest, setName, userKey);
+ }
+
+ public long? ParseVersion(int fieldCount)
+ {
+ long? version = null;
+
+ for (int i = 0; i < fieldCount; i++)
+ {
+ int len = ByteUtil.BytesToInt(dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ int type = dataBuffer[dataOffset++];
+ int size = len - 1;
+
+ if (type == FieldType.RECORD_VERSION && size == 7)
+ {
+ version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset);
+ }
+ dataOffset += size;
+ }
+ return version;
+ }
+
+ protected void ParseFields(Txn txn, Key key, bool hasWrite)
+ {
+ if (txn == null)
+ {
+ SkipFields(fieldCount);
+ return;
+ }
+
+ long? version = null;
+
+ for (int i = 0; i < fieldCount; i++)
+ {
+ int len = ByteUtil.BytesToInt(dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ int type = dataBuffer[dataOffset++];
+ int size = len - 1;
+
+ if (type == FieldType.RECORD_VERSION)
+ {
+ if (size == 7)
+ {
+ version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset);
+ }
+ else
+ {
+ throw new AerospikeException("Record version field has invalid size: " + size);
+ }
+ }
+ dataOffset += size;
+ }
+
+ if (hasWrite)
+ {
+ txn.OnWrite(key, version, resultCode);
+ }
+ else
+ {
+ txn.OnRead(key, version);
+ }
+ }
+
+ protected void ParseTxnDeadline(Txn txn)
+ {
+ for (int i = 0; i < fieldCount; i++)
+ {
+ int len = ByteUtil.BytesToInt(dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ int type = dataBuffer[dataOffset++];
+ int size = len - 1;
+
+ if (type == FieldType.MRT_DEADLINE)
+ {
+ int deadline = ByteUtil.LittleBytesToInt(dataBuffer, dataOffset);
+ txn.Deadline = deadline;
+ }
+ dataOffset += size;
+ }
+ }
+
+ protected void SkipFields(int fieldCount)
+ {
+ // There can be fields in the response (setname etc).
+ // But for now, ignore them. Expose them to the API if needed in the future.
+ for (int i = 0; i < fieldCount; i++)
+ {
+ int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset);
+ dataOffset += 4 + fieldlen;
+ }
+ }
+
+ public static bool BatchInDoubt(bool isWrite, int commandSentCounter)
+ {
+ return isWrite && commandSentCounter > 1;
+ }
+
+ public interface BatchOffsets
+ {
+ int Size();
+ int Get(int i);
+ }
+
+ private class BatchOffsetsNative : BatchOffsets
+ {
+ private int size;
+ private int[] offsets;
+
+ public BatchOffsetsNative(BatchNode batch)
+ {
+ this.size = batch.offsetsSize;
+ this.offsets = batch.offsets;
+ }
+
+ public int Size()
+ {
+ return size;
+ }
+
+ public int Get(int i)
+ {
+ return offsets[i];
+ }
+ }
+ }
+}
+#pragma warning restore 0618
diff --git a/AerospikeClient/Command/DeleteCommand.cs b/AerospikeClient/Command/DeleteCommand.cs
index de56574e..ce1a005e 100644
--- a/AerospikeClient/Command/DeleteCommand.cs
+++ b/AerospikeClient/Command/DeleteCommand.cs
@@ -1,96 +1,71 @@
-/*
- * Copyright 2012-2024 Aerospike, Inc.
- *
- * Portions may be licensed to Aerospike, Inc. under one or more contributor
- * license agreements.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-namespace Aerospike.Client
-{
- public sealed class DeleteCommand : SyncCommand
- {
- private readonly WritePolicy writePolicy;
- private readonly Key key;
- private readonly Partition partition;
- private bool existed;
-
- public DeleteCommand(Cluster cluster, WritePolicy writePolicy, Key key)
- : base(cluster, writePolicy)
- {
- this.writePolicy = writePolicy;
- this.key = key;
- this.partition = Partition.Write(cluster, writePolicy, key);
- cluster.AddTran();
- }
-
- protected internal override bool IsWrite()
- {
- return true;
- }
-
- protected internal override Node GetNode()
- {
- return partition.GetNodeWrite(cluster);
- }
-
- protected override Latency.LatencyType GetLatencyType()
- {
- return Latency.LatencyType.WRITE;
- }
-
- protected internal override void WriteBuffer()
- {
- SetDelete(writePolicy, key);
- }
-
- protected internal override void ParseResult(Connection conn)
- {
- ParseHeader(conn);
-
- if (resultCode == 0)
- {
- existed = true;
- return;
- }
-
- if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR)
- {
- existed = false;
- return;
- }
-
- if (resultCode == ResultCode.FILTERED_OUT)
- {
- if (writePolicy.failOnFilteredOut)
- {
- throw new AerospikeException(resultCode);
- }
- existed = true;
- return;
- }
-
- throw new AerospikeException(resultCode);
- }
-
- protected internal override bool PrepareRetry(bool timeout)
- {
- partition.PrepareRetryWrite(timeout);
- return true;
- }
-
- public bool Existed()
- {
- return existed;
- }
- }
-}
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+using System;
+
+namespace Aerospike.Client
+{
+ public sealed class DeleteCommand : SyncWriteCommand
+ {
+ private bool existed;
+
+ public DeleteCommand(Cluster cluster, WritePolicy writePolicy, Key key)
+ : base(cluster, writePolicy, key)
+ {
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetDelete(writePolicy, key);
+ }
+
+ protected internal override void ParseResult(Connection conn)
+ {
+ ParseHeader(conn);
+ ParseFields(policy.Txn, key, true);
+
+ if (resultCode == ResultCode.OK)
+ {
+ existed = true;
+ return;
+ }
+
+ if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR)
+ {
+ existed = false;
+ return;
+ }
+
+ if (resultCode == ResultCode.FILTERED_OUT)
+ {
+ if (writePolicy.failOnFilteredOut)
+ {
+ throw new AerospikeException(resultCode);
+ }
+ existed = true;
+ return;
+ }
+
+ throw new AerospikeException(resultCode);
+ }
+
+ public bool Existed()
+ {
+ return existed;
+ }
+ }
+}
diff --git a/AerospikeClient/Command/ExecuteCommand.cs b/AerospikeClient/Command/ExecuteCommand.cs
index a92bfe3b..6637d24a 100644
--- a/AerospikeClient/Command/ExecuteCommand.cs
+++ b/AerospikeClient/Command/ExecuteCommand.cs
@@ -15,14 +15,16 @@
* the License.
*/
+using Aerospike.Client;
+
namespace Aerospike.Client
{
- public sealed class ExecuteCommand : ReadCommand
+ public sealed class ExecuteCommand : SyncWriteCommand
{
- private readonly WritePolicy writePolicy;
private readonly string packageName;
private readonly string functionName;
private readonly Value[] args;
+ public Record Record { get; private set; }
public ExecuteCommand
(
@@ -32,43 +34,73 @@ public ExecuteCommand
string packageName,
string functionName,
Value[] args
- ) : base(cluster, writePolicy, key, Partition.Write(cluster, writePolicy, key), false)
+ ) : base(cluster, writePolicy, key)
{
- this.writePolicy = writePolicy;
this.packageName = packageName;
this.functionName = functionName;
this.args = args;
}
- protected internal override bool IsWrite()
+ protected internal override void WriteBuffer()
{
- return true;
+ SetUdf(writePolicy, key, packageName, functionName, args);
}
- protected internal override Node GetNode()
+ protected internal override void ParseResult(Connection conn)
{
- return partition.GetNodeWrite(cluster);
- }
+ ParseHeader(conn);
+ ParseFields(policy.Txn, key, true);
- protected override Latency.LatencyType GetLatencyType()
- {
- return Latency.LatencyType.WRITE;
- }
+ if (resultCode == ResultCode.OK)
+ {
+ Record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, false);
+ return;
+ }
- protected internal override void WriteBuffer()
- {
- SetUdf(writePolicy, key, packageName, functionName, args);
- }
+ if (resultCode == ResultCode.UDF_BAD_RESPONSE)
+ {
+ Record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, false);
+ HandleUdfError(resultCode);
+ return;
+ }
+
+ if (resultCode == ResultCode.FILTERED_OUT)
+ {
+ if (policy.failOnFilteredOut)
+ {
+ throw new AerospikeException(resultCode);
+ }
+ return;
+ }
- protected internal override void HandleNotFound(int resultCode)
- {
throw new AerospikeException(resultCode);
}
- protected internal override bool PrepareRetry(bool timeout)
+ private void HandleUdfError(int resultCode)
{
- partition.PrepareRetryWrite(timeout);
- return true;
+ string ret = (string)Record.bins["FAILURE"];
+
+ if (ret == null)
+ {
+ throw new AerospikeException(resultCode);
+ }
+
+ string message;
+ int code;
+
+ try
+ {
+ string[] list = ret.Split(":");
+ Int32.TryParse(list[2].Trim(), out code);
+ message = list[0] + ':' + list[1] + ' ' + list[3];
+ }
+ catch (Exception e)
+ {
+ // Use generic exception if parse error occurs.
+ throw new AerospikeException(resultCode, ret);
+ }
+
+ throw new AerospikeException(code, message);
}
}
}
diff --git a/AerospikeClient/Command/ExistsCommand.cs b/AerospikeClient/Command/ExistsCommand.cs
index 64b734d6..24158b0c 100644
--- a/AerospikeClient/Command/ExistsCommand.cs
+++ b/AerospikeClient/Command/ExistsCommand.cs
@@ -17,28 +17,13 @@
namespace Aerospike.Client
{
- public sealed class ExistsCommand : SyncCommand
+ public sealed class ExistsCommand : SyncReadCommand
{
- private readonly Key key;
- private readonly Partition partition;
private bool exists;
public ExistsCommand(Cluster cluster, Policy policy, Key key)
- : base(cluster, policy)
+ : base(cluster, policy, key)
{
- this.key = key;
- this.partition = Partition.Read(cluster, policy, key);
- cluster.AddTran();
- }
-
- protected internal override Node GetNode()
- {
- return partition.GetNodeRead(cluster);
- }
-
- protected override Latency.LatencyType GetLatencyType()
- {
- return Latency.LatencyType.READ;
}
protected internal override void WriteBuffer()
@@ -49,8 +34,9 @@ protected internal override void WriteBuffer()
protected internal override void ParseResult(Connection conn)
{
ParseHeader(conn);
+ ParseFields(policy.Txn, key, false);
- if (resultCode == 0)
+ if (resultCode == ResultCode.OK)
{
exists = true;
return;
@@ -75,12 +61,6 @@ protected internal override void ParseResult(Connection conn)
throw new AerospikeException(resultCode);
}
- protected internal override bool PrepareRetry(bool timeout)
- {
- partition.PrepareRetryRead(timeout);
- return true;
- }
-
public bool Exists()
{
return exists;
diff --git a/AerospikeClient/Command/FieldType.cs b/AerospikeClient/Command/FieldType.cs
index 75cd5fa3..7b4b01d0 100644
--- a/AerospikeClient/Command/FieldType.cs
+++ b/AerospikeClient/Command/FieldType.cs
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2022 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements.
@@ -21,8 +21,11 @@ public sealed class FieldType
public const int NAMESPACE = 0;
public const int TABLE = 1;
public const int KEY = 2;
+ public const int RECORD_VERSION = 3;
public const int DIGEST_RIPE = 4;
- public const int TRAN_ID = 7; // user supplied transaction id, which is simply passed back
+ public const int MRT_ID = 5;
+ public const int MRT_DEADLINE = 6;
+ public const int QUERY_ID = 7;
public const int SOCKET_TIMEOUT = 9;
public const int RECORDS_PER_SECOND = 10;
public const int PID_ARRAY = 11;
diff --git a/AerospikeClient/Command/MultiCommand.cs b/AerospikeClient/Command/MultiCommand.cs
index c95ceabb..f2178cfe 100644
--- a/AerospikeClient/Command/MultiCommand.cs
+++ b/AerospikeClient/Command/MultiCommand.cs
@@ -27,12 +27,7 @@ public abstract class MultiCommand : SyncCommand
protected internal readonly String ns;
private readonly ulong clusterKey;
protected internal int info3;
- protected internal int resultCode;
- protected internal int generation;
- protected internal int expiration;
protected internal int batchIndex;
- protected internal int fieldCount;
- protected internal int opCount;
protected internal readonly bool isOperation;
private readonly bool first;
protected internal volatile bool valid = true;
diff --git a/AerospikeClient/Command/OperateArgs.cs b/AerospikeClient/Command/OperateArgs.cs
index c13edab7..bdf42800 100644
--- a/AerospikeClient/Command/OperateArgs.cs
+++ b/AerospikeClient/Command/OperateArgs.cs
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements.
@@ -30,7 +30,6 @@ public OperateArgs
WritePolicy policy,
WritePolicy writeDefault,
WritePolicy readDefault,
- Key key,
Operation[] operations
)
{
@@ -131,17 +130,5 @@ Operation[] operations
}
writeAttr = wattr;
}
-
- public Partition GetPartition(Cluster cluster, Key key)
- {
- if (hasWrite)
- {
- return Partition.Write(cluster, writePolicy, key);
- }
- else
- {
- return Partition.Read(cluster, writePolicy, key);
- }
- }
}
}
diff --git a/AerospikeClient/Command/OperateCommandRead.cs b/AerospikeClient/Command/OperateCommandRead.cs
new file mode 100644
index 00000000..f20f2ec7
--- /dev/null
+++ b/AerospikeClient/Command/OperateCommandRead.cs
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+namespace Aerospike.Client
+{
+ public sealed class OperateCommandRead : ReadCommand
+ {
+ private readonly OperateArgs args;
+
+ public OperateCommandRead(Cluster cluster, Key key, OperateArgs args)
+ : base(cluster, args.writePolicy, key, true)
+ {
+ this.args = args;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetOperate(args.writePolicy, key, args);
+ }
+ }
+}
diff --git a/AerospikeClient/Command/OperateCommandWrite.cs b/AerospikeClient/Command/OperateCommandWrite.cs
new file mode 100644
index 00000000..2eec1e56
--- /dev/null
+++ b/AerospikeClient/Command/OperateCommandWrite.cs
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+using Aerospike.Client;
+
+namespace Aerospike.Client
+{
+ public sealed class OperateCommandWrite : SyncWriteCommand
+ {
+ private readonly OperateArgs args;
+ public Record Record { get; private set; }
+
+ public OperateCommandWrite(Cluster cluster, Key key, OperateArgs args)
+ : base(cluster, args.writePolicy, key)
+ {
+ this.args = args;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetOperate(args.writePolicy, key, args);
+ }
+
+ protected internal override void ParseResult(Connection conn)
+ {
+ ParseHeader(conn);
+ ParseFields(policy.Txn, key, true);
+
+ if (resultCode == ResultCode.OK) {
+ Record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, true);
+ return;
+ }
+
+ if (resultCode == ResultCode.FILTERED_OUT)
+ {
+ if (policy.failOnFilteredOut)
+ {
+ throw new AerospikeException(resultCode);
+ }
+ return;
+ }
+
+ throw new AerospikeException(resultCode);
+ }
+
+ }
+}
diff --git a/AerospikeClient/Command/ReadCommand.cs b/AerospikeClient/Command/ReadCommand.cs
index 8dfc8a57..28d40570 100644
--- a/AerospikeClient/Command/ReadCommand.cs
+++ b/AerospikeClient/Command/ReadCommand.cs
@@ -17,52 +17,31 @@
namespace Aerospike.Client
{
- public class ReadCommand : SyncCommand
+ public class ReadCommand : SyncReadCommand
{
- protected readonly Key key;
- protected readonly Partition partition;
private readonly string[] binNames;
private readonly bool isOperation;
private Record record;
public ReadCommand(Cluster cluster, Policy policy, Key key)
- : base(cluster, policy)
+ : base(cluster, policy, key)
{
- this.key = key;
this.binNames = null;
- this.partition = Partition.Read(cluster, policy, key);
this.isOperation = false;
- cluster.AddTran();
}
public ReadCommand(Cluster cluster, Policy policy, Key key, String[] binNames)
- : base(cluster, policy)
+ : base(cluster, policy, key)
{
- this.key = key;
this.binNames = binNames;
- this.partition = Partition.Read(cluster, policy, key);
this.isOperation = false;
- cluster.AddTran();
}
- public ReadCommand(Cluster cluster, Policy policy, Key key, Partition partition, bool isOperation)
- : base(cluster, policy)
+ public ReadCommand(Cluster cluster, Policy policy, Key key, bool isOperation)
+ : base(cluster, policy, key)
{
- this.key = key;
this.binNames = null;
- this.partition = partition;
this.isOperation = isOperation;
- cluster.AddTran();
- }
-
- protected internal override Node GetNode()
- {
- return partition.GetNodeRead(cluster);
- }
-
- protected override Latency.LatencyType GetLatencyType()
- {
- return Latency.LatencyType.READ;
}
protected internal override void WriteBuffer()
@@ -73,23 +52,16 @@ protected internal override void WriteBuffer()
protected internal override void ParseResult(Connection conn)
{
ParseHeader(conn);
+ ParseFields(policy.Txn, key, false);
- if (resultCode == 0)
+ if (resultCode == ResultCode.OK)
{
- if (opCount == 0)
- {
- // Bin data was not returned.
- record = new Record(null, generation, expiration);
- return;
- }
- SkipKey(fieldCount);
- record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation);
+ this.record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation);
return;
}
if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR)
{
- HandleNotFound(resultCode);
return;
}
@@ -102,56 +74,8 @@ record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, ge
return;
}
- if (resultCode == ResultCode.UDF_BAD_RESPONSE)
- {
- SkipKey(fieldCount);
- record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation);
- HandleUdfError(resultCode);
- return;
- }
-
throw new AerospikeException(resultCode);
}
-
- protected internal override bool PrepareRetry(bool timeout)
- {
- partition.PrepareRetryRead(timeout);
- return true;
- }
-
- protected internal virtual void HandleNotFound(int resultCode)
- {
- // Do nothing in default case. Record will be null.
- }
-
- private void HandleUdfError(int resultCode)
- {
- object obj;
-
- if (!record.bins.TryGetValue("FAILURE", out obj))
- {
- throw new AerospikeException(resultCode);
- }
-
- string ret = (string)obj;
- string message;
- int code;
-
- try
- {
- string[] list = ret.Split(':');
- code = Convert.ToInt32(list[2].Trim());
- message = list[0] + ':' + list[1] + ' ' + list[3];
- }
- catch (Exception e)
- {
- // Use generic exception if parse error occurs.
- throw new AerospikeException(resultCode, ret, e);
- }
-
- throw new AerospikeException(code, message);
- }
-
public Record Record
{
get
diff --git a/AerospikeClient/Command/ReadHeaderCommand.cs b/AerospikeClient/Command/ReadHeaderCommand.cs
index 6fde67f9..dfeceb15 100644
--- a/AerospikeClient/Command/ReadHeaderCommand.cs
+++ b/AerospikeClient/Command/ReadHeaderCommand.cs
@@ -17,28 +17,13 @@
namespace Aerospike.Client
{
- public sealed class ReadHeaderCommand : SyncCommand
+ public sealed class ReadHeaderCommand : SyncReadCommand
{
- private readonly Key key;
- private readonly Partition partition;
private Record record;
public ReadHeaderCommand(Cluster cluster, Policy policy, Key key)
- : base(cluster, policy)
+ : base(cluster, policy, key)
{
- this.key = key;
- this.partition = Partition.Read(cluster, policy, key);
- cluster.AddTran();
- }
-
- protected internal override Node GetNode()
- {
- return partition.GetNodeRead(cluster);
- }
-
- protected override Latency.LatencyType GetLatencyType()
- {
- return Latency.LatencyType.READ;
}
protected internal override void WriteBuffer()
@@ -49,8 +34,8 @@ protected internal override void WriteBuffer()
protected internal override void ParseResult(Connection conn)
{
ParseHeader(conn);
-
- if (resultCode == 0)
+ ParseFields(policy.Txn, key, false);
+ if (resultCode == ResultCode.OK)
{
record = new Record(null, generation, expiration);
return;
@@ -73,12 +58,6 @@ protected internal override void ParseResult(Connection conn)
throw new AerospikeException(resultCode);
}
- protected internal override bool PrepareRetry(bool timeout)
- {
- partition.PrepareRetryRead(timeout);
- return true;
- }
-
public Record Record
{
get
diff --git a/AerospikeClient/Command/ScanExecutor.cs b/AerospikeClient/Command/ScanExecutor.cs
index 8380d849..a50db933 100644
--- a/AerospikeClient/Command/ScanExecutor.cs
+++ b/AerospikeClient/Command/ScanExecutor.cs
@@ -24,7 +24,7 @@ public sealed class ScanExecutor
{
public static void ScanPartitions(Cluster cluster, ScanPolicy policy, string ns, string setName, string[] binNames, ScanCallback callback, PartitionTracker tracker)
{
- cluster.AddTran();
+ cluster.AddCommandCount();
while (true)
{
diff --git a/AerospikeClient/Command/SyncCommand.cs b/AerospikeClient/Command/SyncCommand.cs
index 356b92eb..08cca1ac 100644
--- a/AerospikeClient/Command/SyncCommand.cs
+++ b/AerospikeClient/Command/SyncCommand.cs
@@ -1,439 +1,442 @@
-/*
- * Copyright 2012-2024 Aerospike, Inc.
- *
- * Portions may be licensed to Aerospike, Inc. under one or more contributor
- * license agreements.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-using System.Net.Sockets;
-using static Aerospike.Client.Latency;
-
-namespace Aerospike.Client
-{
- public abstract class SyncCommand : Command
- {
- protected readonly Cluster cluster;
- protected readonly Policy policy;
- internal int iteration = 1;
- internal int commandSentCounter;
- internal DateTime deadline;
- protected int resultCode;
- protected int generation;
- protected int expiration;
- protected int fieldCount;
- protected int opCount;
-
- ///
- /// Default constructor.
- ///
- public SyncCommand(Cluster cluster, Policy policy)
- : base(policy.socketTimeout, policy.totalTimeout, policy.maxRetries)
- {
- this.cluster = cluster;
- this.policy = policy;
- this.deadline = DateTime.MinValue;
- }
-
- ///
- /// Scan/Query constructor.
- ///
- public SyncCommand(Cluster cluster, Policy policy, int socketTimeout, int totalTimeout)
- : base(socketTimeout, totalTimeout, 0)
- {
- this.cluster = cluster;
- this.policy = policy;
- this.deadline = DateTime.MinValue;
- }
-
- public virtual void Execute()
- {
- if (totalTimeout > 0)
- {
- deadline = DateTime.UtcNow.AddMilliseconds(totalTimeout);
- }
- ExecuteCommand();
- }
-
- public void ExecuteCommand()
- {
- Node node;
- AerospikeException exception = null;
- ValueStopwatch metricsWatch = new();
- LatencyType latencyType = cluster.MetricsEnabled ? GetLatencyType() : LatencyType.NONE;
- bool isClientTimeout;
-
- // Execute command until successful, timed out or maximum iterations have been reached.
- while (true)
- {
- try
- {
- node = GetNode();
- }
- catch (AerospikeException ae)
- {
- ae.Policy = policy;
- ae.Iteration = iteration;
- ae.SetInDoubt(IsWrite(), commandSentCounter);
- throw;
- }
-
- try
- {
- node.ValidateErrorCount();
- if (latencyType != LatencyType.NONE)
- {
- metricsWatch = ValueStopwatch.StartNew();
- }
- Connection conn = node.GetConnection(socketTimeout, policy.TimeoutDelay);
-
- try
- {
- // Set command buffer.
- WriteBuffer();
-
- // Send command.
- conn.Write(dataBuffer, dataOffset);
- commandSentCounter++;
-
- // Parse results.
- ParseResult(conn);
-
- // Put connection back in pool.
- node.PutConnection(conn);
-
- if (latencyType != LatencyType.NONE)
- {
- node.AddLatency(latencyType, metricsWatch.Elapsed.TotalMilliseconds);
- }
-
- // Command has completed successfully. Exit method.
- return;
- }
- catch (AerospikeException ae)
- {
- if (ae.KeepConnection())
- {
- // Put connection back in pool.
- node.PutConnection(conn);
- }
- else
- {
- // Close socket to flush out possible garbage. Do not put back in pool.
- node.CloseConnectionOnError(conn);
- }
-
- if (ae.Result == ResultCode.TIMEOUT)
- {
- // Retry on server timeout.
- exception = new AerospikeException.Timeout(policy, false);
- isClientTimeout = false;
- node.IncrErrorRate();
- node.AddTimeout();
- }
- else if (ae.Result == ResultCode.DEVICE_OVERLOAD)
- {
- // Add to circuit breaker error count and retry.
- exception = ae;
- isClientTimeout = false;
- node.IncrErrorRate();
- node.AddError();
- }
- else
- {
- node.AddError();
- throw;
- }
- }
- catch (Connection.ReadTimeout crt)
- {
- if (policy.TimeoutDelay > 0)
- {
- cluster.RecoverConnection(new ConnectionRecover(conn, node, policy.TimeoutDelay, crt, IsSingle()));
- conn = null;
- }
- else
- {
- node.CloseConnection(conn);
- }
- exception = new AerospikeException.Timeout(policy, true);
- isClientTimeout = true;
- node.AddTimeout();
- }
- catch (SocketException se)
- {
- // Socket errors are considered temporary anomalies.
- // Retry after closing connection.
- node.CloseConnectionOnError(conn);
-
- if (se.SocketErrorCode == SocketError.TimedOut)
- {
- isClientTimeout = true;
- node.AddTimeout();
- }
- else
- {
- exception = new AerospikeException.Connection(se);
- isClientTimeout = false;
- node.AddError();
- }
- }
- catch (IOException ioe)
- {
- // IO errors are considered temporary anomalies. Retry.
- // Log.info("IOException: " + tranId + ',' + node + ',' + sequence + ',' + iteration);
- node.CloseConnection(conn);
- exception = new AerospikeException.Connection(ioe);
- isClientTimeout = false;
- node.AddError();
- }
- catch (Exception)
- {
- // All other exceptions are considered fatal. Do not retry.
- // Close socket to flush out possible garbage. Do not put back in pool.
- node.CloseConnectionOnError(conn);
- node.AddError();
- throw;
- }
- }
- catch (SocketException se)
- {
- // This exception might happen after initial connection succeeded, but
- // user login failed with a socket error. Retry.
- if (se.SocketErrorCode == SocketError.TimedOut)
- {
- isClientTimeout = true;
- node.AddTimeout();
- }
- else
- {
- exception = new AerospikeException.Connection(se);
- isClientTimeout = false;
- node.AddError();
- }
- }
- catch (IOException ioe)
- {
- // IO errors are considered temporary anomalies. Retry.
- // Log.info("IOException: " + tranId + ',' + node + ',' + sequence + ',' + iteration);
- exception = new AerospikeException.Connection(ioe);
- isClientTimeout = false;
- node.AddError();
- }
- catch (Connection.ReadTimeout)
- {
- // Connection already handled.
- exception = new AerospikeException.Timeout(policy, true);
- isClientTimeout = true;
- node.AddTimeout();
- }
- catch (AerospikeException.Connection ce)
- {
- // Socket connection error has occurred. Retry.
- exception = ce;
- isClientTimeout = false;
- node.AddError();
- }
- catch (AerospikeException.Backoff be)
- {
- // Node is in backoff state. Retry, hopefully on another node.
- exception = be;
- isClientTimeout = false;
- node.AddError();
- }
- catch (AerospikeException ae)
- {
- ae.Node = node;
- ae.Policy = policy;
- ae.Iteration = iteration;
- ae.SetInDoubt(IsWrite(), commandSentCounter);
- node.AddError();
- throw;
- }
- catch (Exception)
- {
- node.AddError();
- throw;
- }
-
- // Check maxRetries.
- if (iteration > maxRetries)
- {
- break;
- }
-
- if (totalTimeout > 0)
- {
- // Check for total timeout.
- long remaining = (long)deadline.Subtract(DateTime.UtcNow).TotalMilliseconds - policy.sleepBetweenRetries;
-
- if (remaining <= 0)
- {
- break;
- }
-
- if (remaining < totalTimeout)
- {
- totalTimeout = (int)remaining;
-
- if (socketTimeout > totalTimeout)
- {
- socketTimeout = totalTimeout;
- }
- }
- }
-
- if (!isClientTimeout && policy.sleepBetweenRetries > 0)
- {
- // Sleep before trying again.
- Util.Sleep(policy.sleepBetweenRetries);
- }
-
- iteration++;
-
- if (!PrepareRetry(isClientTimeout || exception.Result != ResultCode.SERVER_NOT_AVAILABLE))
- {
- // Batch may be retried in separate commands.
- if (RetryBatch(cluster, socketTimeout, totalTimeout, deadline, iteration, commandSentCounter))
- {
- // Batch was retried in separate commands. Complete this command.
- return;
- }
- }
-
- cluster.AddRetry();
- }
-
- // Retries have been exhausted. Throw last exception.
- if (isClientTimeout)
- {
- exception = new AerospikeException.Timeout(policy, true);
- }
- exception.Node = node;
- exception.Policy = policy;
- exception.Iteration = iteration;
- exception.SetInDoubt(IsWrite(), commandSentCounter);
- throw exception;
- }
-
- protected internal sealed override int SizeBuffer()
- {
- dataBuffer = ThreadLocalData.GetBuffer();
-
- if (dataOffset > dataBuffer.Length)
- {
- dataBuffer = ThreadLocalData.ResizeBuffer(dataOffset);
- }
- dataOffset = 0;
- return dataBuffer.Length;
- }
-
- protected internal void SizeBuffer(int size)
- {
- if (size > dataBuffer.Length)
- {
- dataBuffer = ThreadLocalData.ResizeBuffer(size);
- }
- }
-
- protected internal sealed override void End()
- {
- // Write total size of message.
- ulong size = ((ulong)dataOffset - 8) | (CL_MSG_VERSION << 56) | (AS_MSG_TYPE << 48);
- ByteUtil.LongToBytes(size, dataBuffer, 0);
- }
-
- protected internal void ParseHeader(Connection conn)
- {
- // Read header.
- conn.ReadFully(dataBuffer, 8, Command.STATE_READ_HEADER);
-
- long sz = ByteUtil.BytesToLong(dataBuffer, 0);
- int receiveSize = (int)(sz & 0xFFFFFFFFFFFFL);
-
- if (receiveSize <= 0)
- {
- throw new AerospikeException("Invalid receive size: " + receiveSize);
- }
-
- SizeBuffer(receiveSize);
- conn.ReadFully(dataBuffer, receiveSize, Command.STATE_READ_DETAIL);
- conn.UpdateLastUsed();
-
- ulong type = (ulong)((sz >> 48) & 0xff);
-
- if (type == Command.AS_MSG_TYPE)
- {
- dataOffset = 5;
- }
- else if (type == Command.MSG_TYPE_COMPRESSED)
- {
- int usize = (int)ByteUtil.BytesToLong(dataBuffer, 0);
- byte[] ubuf = new byte[usize];
-
- ByteUtil.Decompress(dataBuffer, 8, receiveSize, ubuf, usize);
- dataBuffer = ubuf;
- dataOffset = 13;
- }
- else
- {
- throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE);
- }
-
- this.resultCode = dataBuffer[dataOffset];
- dataOffset++;
- this.generation = ByteUtil.BytesToInt(dataBuffer, dataOffset);
- dataOffset += 4;
- this.expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset);
- dataOffset += 8;
- this.fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset);
- dataOffset += 2;
- this.opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset);
- dataOffset += 2;
- }
-
- protected internal sealed override void SetLength(int length)
- {
- dataOffset = length;
- }
-
- protected internal virtual bool RetryBatch
- (
- Cluster cluster,
- int socketTimeout,
- int totalTimeout,
- DateTime deadline,
- int iteration,
- int commandSentCounter
- )
- {
- // Override this method in batch to regenerate node assignments.
- return false;
- }
-
- protected internal virtual bool IsWrite()
- {
- return false;
- }
-
- protected virtual bool IsSingle()
- {
- return true;
- }
-
- protected internal abstract Node GetNode();
-
- protected abstract LatencyType GetLatencyType();
- protected internal abstract void WriteBuffer();
- protected internal abstract void ParseResult(Connection conn);
- protected internal abstract bool PrepareRetry(bool timeout);
- }
-}
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+using System;
+using System.Net.Sockets;
+using System.Runtime.InteropServices;
+using static Aerospike.Client.Latency;
+
+namespace Aerospike.Client
+{
+ public abstract class SyncCommand : Command
+ {
+ protected readonly Cluster cluster;
+ protected readonly Policy policy;
+ internal int iteration = 1;
+ internal int commandSentCounter;
+ internal DateTime deadline;
+
+ ///
+ /// Default constructor.
+ ///
+ public SyncCommand(Cluster cluster, Policy policy)
+ : base(policy.socketTimeout, policy.totalTimeout, policy.maxRetries)
+ {
+ this.cluster = cluster;
+ this.policy = policy;
+ this.deadline = DateTime.MinValue;
+ }
+
+ ///
+ /// Scan/Query constructor.
+ ///
+ public SyncCommand(Cluster cluster, Policy policy, int socketTimeout, int totalTimeout)
+ : base(socketTimeout, totalTimeout, 0)
+ {
+ this.cluster = cluster;
+ this.policy = policy;
+ this.deadline = DateTime.MinValue;
+ }
+
+ public virtual void Execute()
+ {
+ if (totalTimeout > 0)
+ {
+ deadline = DateTime.UtcNow.AddMilliseconds(totalTimeout);
+ }
+ ExecuteCommand();
+ }
+
+ public void ExecuteCommand()
+ {
+ Node node;
+ AerospikeException exception = null;
+ ValueStopwatch metricsWatch = new();
+ LatencyType latencyType = cluster.MetricsEnabled ? GetLatencyType() : LatencyType.NONE;
+ bool isClientTimeout;
+
+ // Execute command until successful, timed out or maximum iterations have been reached.
+ while (true)
+ {
+ try
+ {
+ node = GetNode();
+ }
+ catch (AerospikeException ae)
+ {
+ ae.Policy = policy;
+ ae.Iteration = iteration;
+ ae.SetInDoubt(IsWrite(), commandSentCounter);
+ throw;
+ }
+
+ try
+ {
+ node.ValidateErrorCount();
+ if (latencyType != LatencyType.NONE)
+ {
+ metricsWatch = ValueStopwatch.StartNew();
+ }
+ Connection conn = node.GetConnection(socketTimeout, policy.TimeoutDelay);
+
+ try
+ {
+ // Set command buffer.
+ WriteBuffer();
+
+ // Send command.
+ conn.Write(dataBuffer, dataOffset);
+ commandSentCounter++;
+
+ // Parse results.
+ ParseResult(conn);
+
+ // Put connection back in pool.
+ node.PutConnection(conn);
+
+ if (latencyType != LatencyType.NONE)
+ {
+ node.AddLatency(latencyType, metricsWatch.Elapsed.TotalMilliseconds);
+ }
+
+ // Command has completed successfully. Exit method.
+ return;
+ }
+ catch (AerospikeException ae)
+ {
+ if (ae.KeepConnection())
+ {
+ // Put connection back in pool.
+ node.PutConnection(conn);
+ }
+ else
+ {
+ // Close socket to flush out possible garbage. Do not put back in pool.
+ node.CloseConnectionOnError(conn);
+ }
+
+ if (ae.Result == ResultCode.TIMEOUT)
+ {
+ // Retry on server timeout.
+ exception = new AerospikeException.Timeout(policy, false);
+ isClientTimeout = false;
+ node.IncrErrorRate();
+ node.AddTimeout();
+ }
+ else if (ae.Result == ResultCode.DEVICE_OVERLOAD)
+ {
+ // Add to circuit breaker error count and retry.
+ exception = ae;
+ isClientTimeout = false;
+ node.IncrErrorRate();
+ node.AddError();
+ }
+ else
+ {
+ node.AddError();
+ throw;
+ }
+ }
+ catch (Connection.ReadTimeout crt)
+ {
+ if (policy.TimeoutDelay > 0)
+ {
+ cluster.RecoverConnection(new ConnectionRecover(conn, node, policy.TimeoutDelay, crt, IsSingle()));
+ conn = null;
+ }
+ else
+ {
+ node.CloseConnection(conn);
+ }
+ exception = new AerospikeException.Timeout(policy, true);
+ isClientTimeout = true;
+ node.AddTimeout();
+ }
+ catch (SocketException se)
+ {
+ // Socket errors are considered temporary anomalies.
+ // Retry after closing connection.
+ node.CloseConnectionOnError(conn);
+
+ if (se.SocketErrorCode == SocketError.TimedOut)
+ {
+ isClientTimeout = true;
+ node.AddTimeout();
+ }
+ else
+ {
+ exception = new AerospikeException.Connection(se);
+ isClientTimeout = false;
+ node.AddError();
+ }
+ }
+ catch (IOException ioe)
+ {
+ // IO errors are considered temporary anomalies. Retry.
+ // Log.info("IOException: " + tranId + ',' + node + ',' + sequence + ',' + iteration);
+ node.CloseConnection(conn);
+ exception = new AerospikeException.Connection(ioe);
+ isClientTimeout = false;
+ node.AddError();
+ }
+ catch (Exception)
+ {
+ // All other exceptions are considered fatal. Do not retry.
+ // Close socket to flush out possible garbage. Do not put back in pool.
+ node.CloseConnectionOnError(conn);
+ node.AddError();
+ throw;
+ }
+ }
+ catch (SocketException se)
+ {
+ // This exception might happen after initial connection succeeded, but
+ // user login failed with a socket error. Retry.
+ if (se.SocketErrorCode == SocketError.TimedOut)
+ {
+ isClientTimeout = true;
+ node.AddTimeout();
+ }
+ else
+ {
+ exception = new AerospikeException.Connection(se);
+ isClientTimeout = false;
+ node.AddError();
+ }
+ }
+ catch (IOException ioe)
+ {
+ // IO errors are considered temporary anomalies. Retry.
+ // Log.info("IOException: " + tranId + ',' + node + ',' + sequence + ',' + iteration);
+ exception = new AerospikeException.Connection(ioe);
+ isClientTimeout = false;
+ node.AddError();
+ }
+ catch (Connection.ReadTimeout)
+ {
+ // Connection already handled.
+ exception = new AerospikeException.Timeout(policy, true);
+ isClientTimeout = true;
+ node.AddTimeout();
+ }
+ catch (AerospikeException.Connection ce)
+ {
+ // Socket connection error has occurred. Retry.
+ exception = ce;
+ isClientTimeout = false;
+ node.AddError();
+ }
+ catch (AerospikeException.Backoff be)
+ {
+ // Node is in backoff State. Retry, hopefully on another node.
+ exception = be;
+ isClientTimeout = false;
+ node.AddError();
+ }
+ catch (AerospikeException ae)
+ {
+ ae.Node = node;
+ ae.Policy = policy;
+ ae.Iteration = iteration;
+ ae.SetInDoubt(IsWrite(), commandSentCounter);
+ node.AddError();
+ throw;
+ }
+ catch (Exception)
+ {
+ node.AddError();
+ throw;
+ }
+
+ // Check maxRetries.
+ if (iteration > maxRetries)
+ {
+ break;
+ }
+
+ if (totalTimeout > 0)
+ {
+ // Check for total timeout.
+ long remaining = (long)deadline.Subtract(DateTime.UtcNow).TotalMilliseconds - policy.sleepBetweenRetries;
+
+ if (remaining <= 0)
+ {
+ break;
+ }
+
+ if (remaining < totalTimeout)
+ {
+ totalTimeout = (int)remaining;
+
+ if (socketTimeout > totalTimeout)
+ {
+ socketTimeout = totalTimeout;
+ }
+ }
+ }
+
+ if (!isClientTimeout && policy.sleepBetweenRetries > 0)
+ {
+ // Sleep before trying again.
+ Util.Sleep(policy.sleepBetweenRetries);
+ }
+
+ iteration++;
+
+ if (!PrepareRetry(isClientTimeout || exception.Result != ResultCode.SERVER_NOT_AVAILABLE))
+ {
+ // Batch may be retried in separate commands.
+ if (RetryBatch(cluster, socketTimeout, totalTimeout, deadline, iteration, commandSentCounter))
+ {
+ // Batch was retried in separate commands. Complete this command.
+ return;
+ }
+ }
+
+ cluster.AddRetry();
+ }
+
+ // Retries have been exhausted. Throw last exception.
+ if (isClientTimeout)
+ {
+ exception = new AerospikeException.Timeout(policy, true);
+ }
+ exception.Node = node;
+ exception.Policy = policy;
+ exception.Iteration = iteration;
+ exception.SetInDoubt(IsWrite(), commandSentCounter);
+ throw exception;
+ }
+
+ protected internal sealed override int SizeBuffer()
+ {
+ dataBuffer = ThreadLocalData.GetBuffer();
+
+ if (dataOffset > dataBuffer.Length)
+ {
+ dataBuffer = ThreadLocalData.ResizeBuffer(dataOffset);
+ }
+ dataOffset = 0;
+ return dataBuffer.Length;
+ }
+
+ protected internal void SizeBuffer(int size)
+ {
+ if (size > dataBuffer.Length)
+ {
+ dataBuffer = ThreadLocalData.ResizeBuffer(size);
+ }
+ }
+
+ protected internal sealed override void End()
+ {
+ // Write total size of message.
+ ulong size = ((ulong)dataOffset - 8) | (CL_MSG_VERSION << 56) | (AS_MSG_TYPE << 48);
+ ByteUtil.LongToBytes(size, dataBuffer, 0);
+ }
+
+ protected internal void ParseHeader(Connection conn)
+ {
+ // Read header.
+ conn.ReadFully(dataBuffer, 8, Command.STATE_READ_HEADER);
+
+ long sz = ByteUtil.BytesToLong(dataBuffer, 0);
+ int receiveSize = (int)(sz & 0xFFFFFFFFFFFFL);
+
+ if (receiveSize <= 0)
+ {
+ throw new AerospikeException("Invalid receive size: " + receiveSize);
+ }
+
+ SizeBuffer(receiveSize);
+ conn.ReadFully(dataBuffer, receiveSize, Command.STATE_READ_DETAIL);
+ conn.UpdateLastUsed();
+
+ ulong type = (ulong)(sz >> 48) & 0xff;
+
+ if (type == Command.AS_MSG_TYPE)
+ {
+ dataOffset = 5;
+ }
+ else if (type == Command.MSG_TYPE_COMPRESSED)
+ {
+ int usize = (int)ByteUtil.BytesToLong(dataBuffer, 0);
+ byte[] ubuf = new byte[usize];
+
+ ByteUtil.Decompress(dataBuffer, 8, receiveSize, ubuf, usize);
+ dataBuffer = ubuf;
+ dataOffset = 13;
+ }
+ else
+ {
+ throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE);
+ }
+
+ this.resultCode = dataBuffer[dataOffset] & 0xFF;
+ dataOffset++;
+ this.generation = ByteUtil.BytesToInt(dataBuffer, dataOffset);
+ dataOffset += 4;
+ this.expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset);
+ dataOffset += 8;
+ this.fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset);
+ dataOffset += 2;
+ this.opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset);
+ dataOffset += 2;
+ }
+
+ protected internal sealed override void SetLength(int length)
+ {
+ dataOffset = length;
+ }
+
+ // Do nothing by default. Write commands will override this method.
+ protected internal virtual void OnInDoubt()
+ {
+
+ }
+
+ protected internal virtual bool RetryBatch
+ (
+ Cluster cluster,
+ int socketTimeout,
+ int totalTimeout,
+ DateTime deadline,
+ int iteration,
+ int commandSentCounter
+ )
+ {
+ // Override this method in batch to regenerate node assignments.
+ return false;
+ }
+
+ protected internal virtual bool IsWrite()
+ {
+ return false;
+ }
+
+ protected virtual bool IsSingle()
+ {
+ return true;
+ }
+
+ protected internal abstract Node GetNode();
+
+ protected abstract LatencyType GetLatencyType();
+ protected internal abstract void WriteBuffer();
+ protected internal abstract void ParseResult(Connection conn);
+ protected internal abstract bool PrepareRetry(bool timeout);
+ }
+}
diff --git a/AerospikeClient/Command/SyncReadCommand.cs b/AerospikeClient/Command/SyncReadCommand.cs
new file mode 100644
index 00000000..28d1c24b
--- /dev/null
+++ b/AerospikeClient/Command/SyncReadCommand.cs
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+namespace Aerospike.Client
+{
+ public abstract class SyncReadCommand : SyncCommand
+ {
+ protected readonly Key key;
+ private readonly Partition partition;
+
+ public SyncReadCommand(Cluster cluster, Policy policy, Key key)
+ : base(cluster, policy)
+ {
+ this.key = key;
+ this.partition = Partition.Read(cluster, policy, key);
+ cluster.AddCommandCount();
+ }
+
+ protected internal override Node GetNode()
+ {
+ return partition.GetNodeRead(cluster);
+ }
+
+ protected override Latency.LatencyType GetLatencyType()
+ {
+ return Latency.LatencyType.READ;
+ }
+
+ protected internal override bool PrepareRetry(bool timeout)
+ {
+ partition.PrepareRetryRead(timeout);
+ return true;
+ }
+
+ protected internal abstract override void WriteBuffer();
+
+ protected internal abstract override void ParseResult(Connection conn);
+ }
+}
diff --git a/AerospikeClient/Command/OperateCommand.cs b/AerospikeClient/Command/SyncWriteCommand.cs
similarity index 51%
rename from AerospikeClient/Command/OperateCommand.cs
rename to AerospikeClient/Command/SyncWriteCommand.cs
index 45737711..3ba42540 100644
--- a/AerospikeClient/Command/OperateCommand.cs
+++ b/AerospikeClient/Command/SyncWriteCommand.cs
@@ -15,59 +15,56 @@
* the License.
*/
+using System.Runtime.InteropServices;
+
namespace Aerospike.Client
{
- public sealed class OperateCommand : ReadCommand
+ public abstract class SyncWriteCommand : SyncCommand
{
- private readonly OperateArgs args;
+ protected readonly WritePolicy writePolicy;
+ protected readonly Key key;
+ private readonly Partition partition;
- public OperateCommand(Cluster cluster, Key key, OperateArgs args)
- : base(cluster, args.writePolicy, key, args.GetPartition(cluster, key), true)
+ public SyncWriteCommand(Cluster cluster, WritePolicy writePolicy, Key key)
+ : base(cluster, writePolicy)
{
- this.args = args;
+ this.writePolicy = writePolicy;
+ this.key = key;
+ this.partition = Partition.Write(cluster, writePolicy, key);
+ cluster.AddCommandCount();
}
protected internal override bool IsWrite()
{
- return args.hasWrite;
+ return true;
}
protected internal override Node GetNode()
{
- return args.hasWrite ? partition.GetNodeWrite(cluster) : partition.GetNodeRead(cluster);
+ return partition.GetNodeWrite(cluster);
}
protected override Latency.LatencyType GetLatencyType()
{
- return args.hasWrite ? Latency.LatencyType.WRITE : Latency.LatencyType.READ;
+ return Latency.LatencyType.WRITE;
}
- protected internal override void WriteBuffer()
+ protected internal override bool PrepareRetry(bool timeout)
{
- SetOperate(args.writePolicy, key, args);
+ partition.PrepareRetryWrite(timeout);
+ return true;
}
- protected internal override void HandleNotFound(int resultCode)
+ protected internal override void OnInDoubt()
{
- // Only throw not found exception for command with write operations.
- // Read-only command operations return a null record.
- if (args.hasWrite)
+ if (writePolicy.Txn != null)
{
- throw new AerospikeException(resultCode);
+ writePolicy.Txn.OnWriteInDoubt(key);
}
}
- protected internal override bool PrepareRetry(bool timeout)
- {
- if (args.hasWrite)
- {
- partition.PrepareRetryWrite(timeout);
- }
- else
- {
- partition.PrepareRetryRead(timeout);
- }
- return true;
- }
+ protected internal abstract override void WriteBuffer();
+
+ protected internal abstract override void ParseResult(Connection conn);
}
}
diff --git a/AerospikeClient/Command/TouchCommand.cs b/AerospikeClient/Command/TouchCommand.cs
index 675f0e2a..1d1913bf 100644
--- a/AerospikeClient/Command/TouchCommand.cs
+++ b/AerospikeClient/Command/TouchCommand.cs
@@ -1,106 +1,74 @@
-/*
- * Copyright 2012-2024 Aerospike, Inc.
- *
- * Portions may be licensed to Aerospike, Inc. under one or more contributor
- * license agreements.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-namespace Aerospike.Client
-{
- public sealed class TouchCommand : SyncCommand
- {
- private readonly WritePolicy writePolicy;
- private readonly Key key;
- private readonly Partition partition;
- private readonly bool failOnNotFound;
- internal bool Touched { get; private set; }
-
- public TouchCommand(Cluster cluster, WritePolicy writePolicy, Key key)
- : base(cluster, writePolicy)
- {
- this.writePolicy = writePolicy;
- this.key = key;
- this.partition = Partition.Write(cluster, writePolicy, key);
- this.failOnNotFound = true;
- cluster.AddTran();
- }
-
- public TouchCommand(Cluster cluster, WritePolicy writePolicy, Key key, bool failOnNotFound)
- : base(cluster, writePolicy)
- {
- this.writePolicy = writePolicy;
- this.key = key;
- this.partition = Partition.Write(cluster, writePolicy, key);
- this.failOnNotFound = failOnNotFound;
- cluster.AddTran();
- }
-
- protected internal override bool IsWrite()
- {
- return true;
- }
-
- protected internal override Node GetNode()
- {
- return partition.GetNodeWrite(cluster);
- }
-
- protected override Latency.LatencyType GetLatencyType()
- {
- return Latency.LatencyType.WRITE;
- }
-
- protected internal override void WriteBuffer()
- {
- SetTouch(writePolicy, key);
- }
-
- protected internal override void ParseResult(Connection conn)
- {
- ParseHeader(conn);
-
- if (resultCode == 0)
- {
- Touched = true;
- return;
- }
-
- Touched = false;
- if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR)
- {
- if (failOnNotFound)
- {
- throw new AerospikeException(resultCode);
- }
- return;
- }
-
- if (resultCode == ResultCode.FILTERED_OUT)
- {
- if (writePolicy.failOnFilteredOut)
- {
- throw new AerospikeException(resultCode);
- }
- return;
- }
-
- throw new AerospikeException(resultCode);
- }
-
- protected internal override bool PrepareRetry(bool timeout)
- {
- partition.PrepareRetryWrite(timeout);
- return true;
- }
- }
-}
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+namespace Aerospike.Client
+{
+ public sealed class TouchCommand : SyncWriteCommand
+ {
+ private readonly bool failOnNotFound;
+ internal bool Touched { get; private set; }
+ public TouchCommand(Cluster cluster, WritePolicy writePolicy, Key key)
+ : base(cluster, writePolicy, key)
+ {
+ this.failOnNotFound = true;
+ }
+
+ public TouchCommand(Cluster cluster, WritePolicy writePolicy, Key key, bool failOnNotFound)
+ : base(cluster, writePolicy, key)
+ {
+ this.failOnNotFound = failOnNotFound;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetTouch(writePolicy, key);
+ }
+
+ protected internal override void ParseResult(Connection conn)
+ {
+ ParseHeader(conn);
+ ParseFields(policy.Txn, key, true);
+
+ if (resultCode == ResultCode.OK)
+ {
+ Touched = true;
+ return;
+ }
+
+ Touched = false;
+ if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR)
+ {
+ if (failOnNotFound)
+ {
+ throw new AerospikeException(resultCode);
+ }
+ return;
+ }
+
+ if (resultCode == ResultCode.FILTERED_OUT)
+ {
+ if (writePolicy.failOnFilteredOut)
+ {
+ throw new AerospikeException(resultCode);
+ }
+ return;
+ }
+
+ throw new AerospikeException(resultCode);
+ }
+ }
+}
diff --git a/AerospikeClient/Command/TxnAddKeys.cs b/AerospikeClient/Command/TxnAddKeys.cs
new file mode 100644
index 00000000..51d55c3f
--- /dev/null
+++ b/AerospikeClient/Command/TxnAddKeys.cs
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+namespace Aerospike.Client
+{
+ public sealed class TxnAddKeys : SyncWriteCommand
+ {
+ private readonly OperateArgs args;
+ private readonly Txn txn;
+
+ public TxnAddKeys (Cluster cluster, Key key, OperateArgs args, Txn txn)
+ : base(cluster, args.writePolicy, key)
+ {
+ this.args = args;
+ this.txn = txn;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetTxnAddKeys(args.writePolicy, key, args);
+ }
+
+ protected internal override void ParseResult(Connection conn)
+ {
+ ParseHeader(conn);
+ ParseTxnDeadline(txn);
+
+ if (resultCode == ResultCode.OK)
+ {
+ return;
+ }
+
+ throw new AerospikeException(resultCode);
+ }
+ }
+}
diff --git a/AerospikeClient/Command/TxnClose.cs b/AerospikeClient/Command/TxnClose.cs
new file mode 100644
index 00000000..73df0db0
--- /dev/null
+++ b/AerospikeClient/Command/TxnClose.cs
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+namespace Aerospike.Client
+{
+ public sealed class TxnClose : SyncWriteCommand
+ {
+ private readonly Txn txn;
+
+ public TxnClose(Cluster cluster, Txn txn, WritePolicy writePolicy, Key key)
+ : base(cluster, writePolicy, key)
+ {
+ this.txn = txn;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetTxnClose(txn, key);
+ }
+
+ protected internal override void ParseResult(Connection conn)
+ {
+ ParseHeader(conn);
+ ParseFields(policy.Txn, key, true);
+
+ if (resultCode == ResultCode.OK || resultCode == ResultCode.KEY_NOT_FOUND_ERROR)
+ {
+ return;
+ }
+
+ throw new AerospikeException(resultCode);
+ }
+
+ protected internal override void OnInDoubt()
+ {
+ }
+ }
+}
diff --git a/AerospikeClient/Command/TxnMarkRollForward.cs b/AerospikeClient/Command/TxnMarkRollForward.cs
new file mode 100644
index 00000000..a59b86a6
--- /dev/null
+++ b/AerospikeClient/Command/TxnMarkRollForward.cs
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+namespace Aerospike.Client
+{
+ public sealed class TxnMarkRollForward : SyncWriteCommand
+ {
+ public TxnMarkRollForward(Cluster cluster, Txn txn, WritePolicy writePolicy, Key key)
+ : base(cluster, writePolicy, key)
+ {
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetTxnMarkRollForward(key);
+ }
+
+ protected internal override void ParseResult(Connection conn)
+ {
+ ParseHeader(conn);
+ ParseFields(policy.Txn, key, true);
+
+ // MRT_COMMITTED is considered a success because it means a previous attempt already
+ // succeeded in notifying the server that the MRT will be rolled forward.
+ if (resultCode == ResultCode.OK || resultCode == ResultCode.MRT_COMMITTED)
+ {
+ return;
+ }
+
+ throw new AerospikeException(resultCode);
+ }
+
+ protected internal override void OnInDoubt()
+ {
+ }
+ }
+}
diff --git a/AerospikeClient/Command/TxnMonitor.cs b/AerospikeClient/Command/TxnMonitor.cs
new file mode 100644
index 00000000..b0826e3b
--- /dev/null
+++ b/AerospikeClient/Command/TxnMonitor.cs
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+namespace Aerospike.Client
+{
+ public sealed class TxnMonitor
+ {
+ private static readonly ListPolicy OrderedListPolicy = new(ListOrder.ORDERED,
+ ListWriteFlags.ADD_UNIQUE | ListWriteFlags.NO_FAIL | ListWriteFlags.PARTIAL);
+
+ private static readonly string BinNameId = "id";
+ private static readonly string BinNameDigests = "keyds";
+
+ public static void AddKey(Cluster cluster, WritePolicy policy, Key cmdKey)
+ {
+ Txn txn = policy.Txn;
+
+ if (txn.Writes.Contains(cmdKey))
+ {
+ // Transaction monitor already contains this key.
+ return;
+ }
+
+ Operation[] ops = GetTxnOps(txn, cmdKey);
+ AddWriteKeys(cluster, policy, ops);
+ }
+
+ public static void AddKeys(Cluster cluster, BatchPolicy policy, Key[] keys)
+ {
+ Operation[] ops = GetTxnOps(policy.Txn, keys);
+ AddWriteKeys(cluster, policy, ops);
+ }
+
+ public static void AddKeys(Cluster cluster, BatchPolicy policy, List records)
+ {
+ Operation[] ops = GetTxnOps(policy.Txn, records);
+
+ if (ops != null)
+ {
+ AddWriteKeys(cluster, policy, ops);
+ }
+ }
+
+ public static Operation[] GetTxnOps(Txn txn, Key cmdKey)
+ {
+ txn.VerifyCommand();
+ txn.SetNamespace(cmdKey.ns);
+
+ if (txn.MonitorExists())
+ {
+ // No existing monitor record.
+ return new Operation[] {
+ ListOperation.Append(OrderedListPolicy, BinNameDigests, Value.Get(cmdKey.digest))
+ };
+ }
+ else
+ {
+ return new Operation[] {
+ Operation.Put(new Bin(BinNameId, txn.Id)),
+ ListOperation.Append(OrderedListPolicy, BinNameDigests, Value.Get(cmdKey.digest))
+ };
+ }
+ }
+
+ public static Operation[] GetTxnOps(Txn txn, Key[] keys)
+ {
+ txn.VerifyCommand();
+
+ List list = new(keys.Length);
+
+ foreach (Key key in keys)
+ {
+ txn.SetNamespace(key.ns);
+ list.Add(Value.Get(key.digest));
+ }
+ return GetTxnOps(txn, list);
+ }
+
+ public static Operation[] GetTxnOps(Txn txn, List records)
+ {
+ txn.VerifyCommand();
+
+ List list = new(records.Count);
+
+ foreach (BatchRecord br in records) {
+ txn.SetNamespace(br.key.ns);
+
+ if (br.hasWrite)
+ {
+ list.Add(Value.Get(br.key.digest));
+ }
+ }
+
+ if (list.Count == 0)
+ {
+ // Readonly batch does not need to add key digests.
+ return null;
+ }
+ return GetTxnOps(txn, list);
+ }
+
+ private static Operation[] GetTxnOps(Txn txn, List list)
+ {
+ if (txn.MonitorExists())
+ {
+ // No existing monitor record.
+ return new Operation[] {
+ ListOperation.AppendItems(OrderedListPolicy, BinNameDigests, list)
+ };
+ }
+ else
+ {
+ return new Operation[] {
+ Operation.Put(new Bin(BinNameId, txn.Id)),
+ ListOperation.AppendItems(OrderedListPolicy, BinNameDigests, list)
+ };
+ }
+ }
+
+ private static void AddWriteKeys(Cluster cluster, Policy policy, Operation[] ops)
+ {
+ Txn txn = policy.Txn;
+ Key txnKey = GetTxnMonitorKey(policy.Txn);
+ WritePolicy wp = CopyTimeoutPolicy(policy);
+ OperateArgs args = new(wp, null, null, ops);
+ TxnAddKeys cmd = new(cluster, txnKey, args, txn);
+ cmd.Execute();
+ }
+
+ public static Key GetTxnMonitorKey(Txn txn)
+ {
+ return new Key(txn.Ns, "
+ {
+ if (max == 0) return false;
+
+ records = new BatchRecord[max];
+ keys = new Key[max];
+ versions = new long?[max];
+ return true;
+ },
+ (key, value, count) =>
+ {
+ keys[count] = key;
+ records[count] = new BatchRecord(key, false);
+ versions[count] = value;
+ });
+
+ if (!actionPerformed) // If no action was performed, there are no elements. Return.
+ {
+ return;
+ }
+
+ this.verifyRecords = records;
+
+ BatchStatus status = new(true);
+ List bns = BatchNode.GenerateList(cluster, verifyPolicy, keys, records, false, status);
+ BatchCommand[] commands = new BatchCommand[bns.Count];
+
+ int count = 0;
+
+ foreach (BatchNode bn in bns)
+ {
+ commands[count++] = new BatchTxnVerify(
+ cluster, bn, verifyPolicy, keys, versions, records, status);
+ }
+
+ BatchExecutor.Execute(cluster, verifyPolicy, commands, status);
+
+ if (!status.GetStatus())
+ {
+ throw new AerospikeException("Failed to verify one or more record versions");
+ }
+ }
+
+ private void MarkRollForward(WritePolicy writePolicy, Key txnKey)
+ {
+ // Tell MRT monitor that a roll-forward will commence.
+ TxnMarkRollForward cmd = new(cluster, txn, writePolicy, txnKey);
+ cmd.Execute();
+ }
+
+ private void Roll(BatchPolicy rollPolicy, int txnAttr)
+ {
+ BatchRecord[] records = null;
+ Key[] keys = null;
+
+ bool actionPerformed = txn.Writes.PerformActionOnEachElement(max =>
+ {
+ if (max == 0) return false;
+
+ records = new BatchRecord[max];
+ keys = new Key[max];
+ return true;
+ },
+ (item, count) =>
+ {
+ keys[count] = item;
+ records[count] = new BatchRecord(item, true);
+ });
+
+ if (!actionPerformed)
+ {
+ return;
+ }
+
+ this.rollRecords = records;
+
+ BatchAttr attr = new();
+ attr.SetTxn(txnAttr);
+ BatchStatus status = new(true);
+
+ // generate() requires a null transaction instance.
+ List bns = BatchNode.GenerateList(cluster, rollPolicy, keys, records, true, status);
+ BatchCommand[] commands = new BatchCommand[bns.Count];
+
+ int count = 0;
+
+ foreach (BatchNode bn in bns)
+ {
+ commands[count++] = new BatchTxnRoll(
+ cluster, bn, rollPolicy, txn, keys, records, attr, status);
+ }
+ BatchExecutor.Execute(cluster, rollPolicy, commands, status);
+
+ if (!status.GetStatus())
+ {
+ string rollString = txnAttr == Command.INFO4_MRT_ROLL_FORWARD ? "commit" : "abort";
+ throw new AerospikeException("Failed to " + rollString + " one or more records");
+ }
+ }
+
+ private void Close(WritePolicy writePolicy, Key txnKey)
+ {
+ // Delete MRT monitor on server.
+ TxnClose cmd = new(cluster, txn, writePolicy, txnKey);
+ cmd.Execute();
+
+ // Reset MRT on client.
+ txn.Clear();
+ }
+ }
+}
diff --git a/AerospikeClient/Command/WriteCommand.cs b/AerospikeClient/Command/WriteCommand.cs
index 018fc94d..5f275729 100644
--- a/AerospikeClient/Command/WriteCommand.cs
+++ b/AerospikeClient/Command/WriteCommand.cs
@@ -1,86 +1,59 @@
-/*
- * Copyright 2012-2024 Aerospike, Inc.
- *
- * Portions may be licensed to Aerospike, Inc. under one or more contributor
- * license agreements.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-namespace Aerospike.Client
-{
- public sealed class WriteCommand : SyncCommand
- {
- private readonly WritePolicy writePolicy;
- private readonly Key key;
- private readonly Partition partition;
- private readonly Bin[] bins;
- private readonly Operation.Type operation;
-
- public WriteCommand(Cluster cluster, WritePolicy writePolicy, Key key, Bin[] bins, Operation.Type operation)
- : base(cluster, writePolicy)
- {
- this.writePolicy = writePolicy;
- this.key = key;
- this.partition = Partition.Write(cluster, writePolicy, key);
- this.bins = bins;
- this.operation = operation;
- cluster.AddTran();
- }
-
- protected internal override bool IsWrite()
- {
- return true;
- }
-
- protected internal override Node GetNode()
- {
- return partition.GetNodeWrite(cluster);
- }
-
- protected override Latency.LatencyType GetLatencyType()
- {
- return Latency.LatencyType.WRITE;
- }
-
- protected internal override void WriteBuffer()
- {
- SetWrite(writePolicy, operation, key, bins);
- }
-
- protected internal override void ParseResult(Connection conn)
- {
- ParseHeader(conn);
-
- if (resultCode == 0)
- {
- return;
- }
-
- if (resultCode == ResultCode.FILTERED_OUT)
- {
- if (writePolicy.failOnFilteredOut)
- {
- throw new AerospikeException(resultCode);
- }
- return;
- }
-
- throw new AerospikeException(resultCode);
- }
-
- protected internal override bool PrepareRetry(bool timeout)
- {
- partition.PrepareRetryWrite(timeout);
- return true;
- }
- }
-}
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+namespace Aerospike.Client
+{
+ public sealed class WriteCommand : SyncWriteCommand
+ {
+ private readonly Bin[] bins;
+ private readonly Operation.Type operation;
+
+ public WriteCommand(Cluster cluster, WritePolicy writePolicy, Key key, Bin[] bins, Operation.Type operation)
+ : base(cluster, writePolicy, key)
+ {
+ this.bins = bins;
+ this.operation = operation;
+ }
+
+ protected internal override void WriteBuffer()
+ {
+ SetWrite(writePolicy, operation, key, bins);
+ }
+
+ protected internal override void ParseResult(Connection conn)
+ {
+ ParseHeader(conn);
+ ParseFields(policy.Txn, key, true);
+
+ if (resultCode == ResultCode.OK)
+ {
+ return;
+ }
+
+ if (resultCode == ResultCode.FILTERED_OUT)
+ {
+ if (writePolicy.failOnFilteredOut)
+ {
+ throw new AerospikeException(resultCode);
+ }
+ return;
+ }
+
+ throw new AerospikeException(resultCode);
+ }
+ }
+}
diff --git a/AerospikeClient/Exp/Exp.cs b/AerospikeClient/Exp/Exp.cs
index 692077b6..dd3e0de9 100644
--- a/AerospikeClient/Exp/Exp.cs
+++ b/AerospikeClient/Exp/Exp.cs
@@ -408,12 +408,12 @@ public static Exp TTL()
}
///
- /// Create expression that returns if record has been deleted and is still in tombstone state.
+ /// Create expression that returns if record has been deleted and is still in tombstone State.
/// This expression usually evaluates quickly because record meta data is cached in memory.
///
///
///
- /// // Deleted records that are in tombstone state.
+ /// // Deleted records that are in tombstone State.
/// Exp.isTombstone()
///
///
diff --git a/AerospikeClient/Listener/AbortListener.cs b/AerospikeClient/Listener/AbortListener.cs
new file mode 100644
index 00000000..b5bb878b
--- /dev/null
+++ b/AerospikeClient/Listener/AbortListener.cs
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+using static Aerospike.Client.AbortStatus;
+
+namespace Aerospike.Client
+{
+ ///
+ /// Asynchronous result notifications for multi-record transaction (MRT) aborts.
+ ///
+ public interface AbortListener
+ {
+ ///
+ /// This method is called when the abort succeeded or will succeed.
+ ///
+ void OnSuccess(AbortStatusType status);
+ }
+}
diff --git a/AerospikeClient/Listener/CommitListener.cs b/AerospikeClient/Listener/CommitListener.cs
new file mode 100644
index 00000000..2629c7de
--- /dev/null
+++ b/AerospikeClient/Listener/CommitListener.cs
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+using static Aerospike.Client.CommitStatus;
+
+namespace Aerospike.Client
+{
+ ///
+ /// Asynchronous result notifications for multi-record transaction (MRT) commits.
+ ///
+ public interface CommitListener
+ {
+ ///
+ /// This method is called when the records are verified and the commit succeeded or will succeed.
+ ///
+ void OnSuccess(CommitStatusType status);
+
+ ///
+ /// This method is called when the commit fails.
+ ///
+ /// error that occurred
+ void OnFailure(AerospikeException.Commit exception);
+ }
+}
diff --git a/AerospikeClient/Main/AbortStatus.cs b/AerospikeClient/Main/AbortStatus.cs
new file mode 100644
index 00000000..94f2dee2
--- /dev/null
+++ b/AerospikeClient/Main/AbortStatus.cs
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+namespace Aerospike.Client
+{
+ ///
+ /// Multi-record transaction (MRT) abort error status code
+ ///
+ public static class AbortStatus
+ {
+ public enum AbortStatusType
+ {
+ OK,
+ ALREADY_COMMITTED,
+ ALREADY_ABORTED,
+ ROLL_BACK_ABANDONED,
+ CLOSE_ABANDONED
+ }
+
+ public static string AbortErrorToString(AbortStatusType status)
+ {
+ return status switch
+ {
+ AbortStatusType.OK => "Abort succeeded.",
+ AbortStatusType.ALREADY_COMMITTED => "Already committed.",
+ AbortStatusType.ALREADY_ABORTED => "Already aborted.",
+ AbortStatusType.ROLL_BACK_ABANDONED => "MRT client roll back abandoned. Server will eventually abort the MRT.",
+ AbortStatusType.CLOSE_ABANDONED => "MRT has been rolled back, but MRT client close was abandoned. Server will eventually close the MRT.",
+ _ => "Unexpected AbortStatusType."
+ };
+ }
+ }
+}
diff --git a/AerospikeClient/Main/AerospikeClient.cs b/AerospikeClient/Main/AerospikeClient.cs
index 617369a4..a91de549 100644
--- a/AerospikeClient/Main/AerospikeClient.cs
+++ b/AerospikeClient/Main/AerospikeClient.cs
@@ -1,2469 +1,2705 @@
-/*
- * Copyright 2012-2024 Aerospike, Inc.
- *
- * Portions may be licensed to Aerospike, Inc. under one or more contributor
- * license agreements.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-using System.Reflection;
-using System.Text;
-
-namespace Aerospike.Client
-{
- ///
- /// Instantiate an AerospikeClient object to access an Aerospike
- /// database cluster and perform database operations.
- ///
- /// This client is thread-safe. One client instance should be used per cluster.
- /// Multiple threads should share this cluster instance.
- ///
- ///
- /// Your application uses this class API to perform database operations such as
- /// writing and reading records, and selecting sets of records. Write operations
- /// include specialized functionality such as append/prepend and arithmetic
- /// addition.
- ///
- ///
- /// Each record may have multiple bins, unless the Aerospike server nodes are
- /// configured as "single-bin". In "multi-bin" mode, partial records may be
- /// written or read by specifying the relevant subset of bins.
- ///
- ///
- public class AerospikeClient : IDisposable, IAerospikeClient
- {
- //-------------------------------------------------------
- // Member variables.
- //-------------------------------------------------------
-
- protected internal Cluster cluster;
-
- ///
- /// Default read policy that is used when read command policy is null.
- ///
- public Policy readPolicyDefault;
-
- ///
- /// Default write policy that is used when write command policy is null.
- ///
- public WritePolicy writePolicyDefault;
-
- ///
- /// Default scan policy that is used when scan command policy is null.
- ///
- public ScanPolicy scanPolicyDefault;
-
- ///
- /// Default query policy that is used when query command policy is null.
- ///
- public QueryPolicy queryPolicyDefault;
-
- ///
- /// Default parent policy used in batch read commands. Parent policy fields
- /// include socketTimeout, totalTimeout, maxRetries, etc...
- ///
- public BatchPolicy batchPolicyDefault;
-
- ///
- /// Default parent policy used in batch write commands. Parent policy fields
- /// include socketTimeout, totalTimeout, maxRetries, etc...
- ///
- public BatchPolicy batchParentPolicyWriteDefault;
-
- ///
- /// Default write policy used in batch operate commands.
- /// Write policy fields include generation, expiration, durableDelete, etc...
- ///
- public BatchWritePolicy batchWritePolicyDefault;
-
- ///
- /// Default delete policy used in batch delete commands.
- ///
- public BatchDeletePolicy batchDeletePolicyDefault;
-
- ///
- /// Default user defined function policy used in batch UDF excecute commands.
- ///
- public BatchUDFPolicy batchUDFPolicyDefault;
-
- ///
- /// Default info policy that is used when info command policy is null.
- ///
- public InfoPolicy infoPolicyDefault;
-
- protected WritePolicy operatePolicyReadDefault;
-
- //-------------------------------------------------------
- // Constructors
- //-------------------------------------------------------
-
- ///
- /// Initialize Aerospike client.
- /// If the host connection succeeds, the client will:
- ///
- /// - Add host to the cluster map
- /// - Request host's list of other nodes in cluster
- /// - Add these nodes to cluster map
- ///
- ///
- /// If the connection succeeds, the client is ready to process database requests.
- /// If the connection fails, the cluster will remain in a disconnected state
- /// until the server is activated.
- ///
- ///
- /// host name
- /// host port
- /// if host connection fails
- public AerospikeClient(string hostname, int port)
- : this(new ClientPolicy(), new Host(hostname, port))
- {
- }
-
- ///
- /// Initialize Aerospike client.
- /// The client policy is used to set defaults and size internal data structures.
- /// If the host connection succeeds, the client will:
- ///
- /// - Add host to the cluster map
- /// - Request host's list of other nodes in cluster
- /// - Add these nodes to cluster map
- ///
- ///
- /// If the connection succeeds, the client is ready to process database requests.
- /// If the connection fails and the policy's failOnInvalidHosts is true, a connection
- /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state
- /// until the server is activated.
- ///
- ///
- /// client configuration parameters, pass in null for defaults
- /// host name
- /// host port
- /// if host connection fails
- public AerospikeClient(ClientPolicy policy, string hostname, int port)
- : this(policy, new Host(hostname, port))
- {
- }
-
- ///
- /// Initialize Aerospike client with suitable hosts to seed the cluster map.
- /// The client policy is used to set defaults and size internal data structures.
- /// For the first host connection that succeeds, the client will:
- ///
- /// - Add host to the cluster map
- /// - Request host's list of other nodes in cluster
- /// - Add these nodes to cluster map
- ///
- ///
- /// In most cases, only one host is necessary to seed the cluster. The remaining hosts
- /// are added as future seeds in case of a complete network failure.
- ///
- ///
- /// If one connection succeeds, the client is ready to process database requests.
- /// If all connections fail and the policy's failIfNotConnected is true, a connection
- /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state
- /// until the server is activated.
- ///
- ///
- /// client configuration parameters, pass in null for defaults
- /// array of potential hosts to seed the cluster
- /// if all host connections fail
- public AerospikeClient(ClientPolicy policy, params Host[] hosts)
- {
- if (policy == null)
- {
- policy = new ClientPolicy();
- }
- this.readPolicyDefault = policy.readPolicyDefault;
- this.writePolicyDefault = policy.writePolicyDefault;
- this.scanPolicyDefault = policy.scanPolicyDefault;
- this.queryPolicyDefault = policy.queryPolicyDefault;
- this.batchPolicyDefault = policy.batchPolicyDefault;
- this.batchParentPolicyWriteDefault = policy.batchParentPolicyWriteDefault;
- this.batchWritePolicyDefault = policy.batchWritePolicyDefault;
- this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault;
- this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault;
- this.infoPolicyDefault = policy.infoPolicyDefault;
- this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault);
-
- cluster = new Cluster(policy, hosts);
- cluster.InitTendThread(policy.failIfNotConnected);
- }
-
- ///
- /// Construct client without initialization.
- /// Should only be used by classes inheriting from this client.
- ///
- protected internal AerospikeClient(ClientPolicy policy)
- {
- if (policy != null)
- {
- this.readPolicyDefault = policy.readPolicyDefault;
- this.writePolicyDefault = policy.writePolicyDefault;
- this.scanPolicyDefault = policy.scanPolicyDefault;
- this.queryPolicyDefault = policy.queryPolicyDefault;
- this.batchPolicyDefault = policy.batchPolicyDefault;
- this.batchParentPolicyWriteDefault = policy.batchParentPolicyWriteDefault;
- this.batchWritePolicyDefault = policy.batchWritePolicyDefault;
- this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault;
- this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault;
- this.infoPolicyDefault = policy.infoPolicyDefault;
- }
- else
- {
- this.readPolicyDefault = new Policy();
- this.writePolicyDefault = new WritePolicy();
- this.scanPolicyDefault = new ScanPolicy();
- this.queryPolicyDefault = new QueryPolicy();
- this.batchPolicyDefault = BatchPolicy.ReadDefault();
- this.batchParentPolicyWriteDefault = BatchPolicy.WriteDefault();
- this.batchWritePolicyDefault = new BatchWritePolicy();
- this.batchDeletePolicyDefault = new BatchDeletePolicy();
- this.batchUDFPolicyDefault = new BatchUDFPolicy();
- this.infoPolicyDefault = new InfoPolicy();
- }
- this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault);
- }
-
- //-------------------------------------------------------
- // Operations policies
- //-------------------------------------------------------
-
- ///
- /// Default read policy that is used when read command policy is null.
- ///
- public Policy ReadPolicyDefault
- {
- get { return readPolicyDefault; }
- set { readPolicyDefault = value; }
- }
-
- ///
- /// Default write policy that is used when write command policy is null.
- ///
- public WritePolicy WritePolicyDefault
- {
- get { return writePolicyDefault; }
- set { writePolicyDefault = value; }
- }
-
- ///
- /// Default scan policy that is used when scan command policy is null.
- ///
- public ScanPolicy ScanPolicyDefault
- {
- get { return scanPolicyDefault; }
- set { scanPolicyDefault = value; }
- }
-
- ///
- /// Default query policy that is used when query command policy is null.
- ///
- public QueryPolicy QueryPolicyDefault
- {
- get { return queryPolicyDefault; }
- set { queryPolicyDefault = value; }
- }
-
- ///
- /// Default parent policy used in batch read commands.Parent policy fields
- /// include socketTimeout, totalTimeout, maxRetries, etc...
- ///
- public BatchPolicy BatchPolicyDefault
- {
- get { return batchPolicyDefault; }
- set { batchPolicyDefault = value; }
- }
-
- ///
- /// Default parent policy used in batch write commands. Parent policy fields
- /// include socketTimeout, totalTimeout, maxRetries, etc...
- ///
- public BatchPolicy BatchParentPolicyWriteDefault
- {
- get { return batchParentPolicyWriteDefault; }
- set { batchParentPolicyWriteDefault = value; }
- }
-
- ///
- /// Default write policy used in batch operate commands.
- /// Write policy fields include generation, expiration, durableDelete, etc...
- ///
- public BatchWritePolicy BatchWritePolicyDefault
- {
- get { return batchWritePolicyDefault; }
- set { batchWritePolicyDefault = value; }
- }
-
- ///
- /// Default delete policy used in batch delete commands.
- ///
- public BatchDeletePolicy BatchDeletePolicyDefault
- {
- get { return batchDeletePolicyDefault; }
- set { batchDeletePolicyDefault = value; }
- }
-
- ///
- /// Default user defined function policy used in batch UDF excecute commands.
- ///
- public BatchUDFPolicy BatchUDFPolicyDefault
- {
- get { return batchUDFPolicyDefault; }
- set { batchUDFPolicyDefault = value; }
- }
-
- ///
- /// Default info policy that is used when info command policy is null.
- ///
- public InfoPolicy InfoPolicyDefault
- {
- get { return infoPolicyDefault; }
- set { infoPolicyDefault = value; }
- }
-
- //-------------------------------------------------------
- // Cluster Connection Management
- //-------------------------------------------------------
-
- public bool Disposed { get; private set; }
- private void Dispose(bool disposing)
- {
- if (!Disposed)
- {
- if (disposing)
- {
- this.Close();
- }
-
- Disposed = true;
- }
- }
-
- ///
- /// Close all client connections to database server nodes.
- ///
- public void Dispose()
- {
- // Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method
- Dispose(disposing: true);
- GC.SuppressFinalize(this);
- }
-
- ///
- /// Close all client connections to database server nodes.
- ///
- public void Close()
- {
- cluster.Close();
- }
-
- ///
- /// Return if we are ready to talk to the database server cluster.
- ///
- public bool Connected
- {
- get
- {
- return cluster.Connected;
- }
- }
-
- ///
- /// Cluster associated with this AerospikeClient instance.
- ///
- public Cluster Cluster
- {
- get
- {
- return cluster;
- }
- }
-
- ///
- /// Return array of active server nodes in the cluster.
- ///
- public Node[] Nodes
- {
- get
- {
- return cluster.Nodes;
- }
- }
-
- ///
- /// Enable extended periodic cluster and node latency metrics.
- ///
- public void EnableMetrics(MetricsPolicy metricsPolicy)
- {
- cluster.EnableMetrics(metricsPolicy);
- }
-
- ///
- /// Disable extended periodic cluster and node latency metrics.
- ///
- public void DisableMetrics()
- {
- cluster.DisableMetrics();
- }
-
- ///
- /// Return operating cluster statistics snapshot.
- ///
- public ClusterStats GetClusterStats()
- {
- return cluster.GetStats();
- }
-
- //-------------------------------------------------------
- // Write Record Operations
- //-------------------------------------------------------
-
- ///
- /// Write record bin(s).
- /// The policy specifies the transaction timeout, record expiration and how the transaction is
- /// handled when the record already exists.
- ///
- /// write configuration parameters, pass in null for defaults
- /// unique record identifier
- /// array of bin name/value pairs
- /// if write fails
- public void Put(WritePolicy policy, Key key, params Bin[] bins)
- {
- if (policy == null)
- {
- policy = writePolicyDefault;
- }
- WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.WRITE);
- command.Execute();
- }
-
- //-------------------------------------------------------
- // String Operations
- //-------------------------------------------------------
-
- ///
- /// Append bin string values to existing record bin values.
- /// The policy specifies the transaction timeout, record expiration and how the transaction is
- /// handled when the record already exists.
- /// This call only works for string values.
- ///
- /// write configuration parameters, pass in null for defaults
- /// unique record identifier
- /// array of bin name/value pairs
- /// if append fails
- public void Append(WritePolicy policy, Key key, params Bin[] bins)
- {
- if (policy == null)
- {
- policy = writePolicyDefault;
- }
- WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.APPEND);
- command.Execute();
- }
-
- ///
- /// Prepend bin string values to existing record bin values.
- /// The policy specifies the transaction timeout, record expiration and how the transaction is
- /// handled when the record already exists.
- /// This call works only for string values.
- ///
- /// write configuration parameters, pass in null for defaults
- /// unique record identifier
- /// array of bin name/value pairs
- /// if prepend fails
- public void Prepend(WritePolicy policy, Key key, params Bin[] bins)
- {
- if (policy == null)
- {
- policy = writePolicyDefault;
- }
- WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.PREPEND);
- command.Execute();
- }
-
- //-------------------------------------------------------
- // Arithmetic Operations
- //-------------------------------------------------------
-
- ///
- /// Add integer/double bin values to existing record bin values.
- /// The policy specifies the transaction timeout, record expiration and how the transaction is
- /// handled when the record already exists.
- ///
- /// write configuration parameters, pass in null for defaults
- /// unique record identifier
- /// array of bin name/value pairs
- /// if add fails
- public void Add(WritePolicy policy, Key key, params Bin[] bins)
- {
- if (policy == null)
- {
- policy = writePolicyDefault;
- }
- WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.ADD);
- command.Execute();
- }
-
- //-------------------------------------------------------
- // Delete Operations
- //-------------------------------------------------------
-
- ///
- /// Delete record for specified key.
- /// Return whether record existed on server before deletion.
- /// The policy specifies the transaction timeout.
- ///
- /// delete configuration parameters, pass in null for defaults
- /// unique record identifier
- /// if delete fails
- public bool Delete(WritePolicy policy, Key key)
- {
- if (policy == null)
- {
- policy = writePolicyDefault;
- }
- DeleteCommand command = new DeleteCommand(cluster, policy, key);
- command.Execute();
- return command.Existed();
- }
-
- ///
- /// Delete records for specified keys. If a key is not found, the corresponding result
- /// will be .
- ///
- /// Requires server version 6.0+
- ///
- ///
- /// batch configuration parameters, pass in null for defaults
- /// delete configuration parameters, pass in null for defaults
- /// array of unique record identifiers
- /// which contains results for keys that did complete
- public BatchResults Delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePolicy, Key[] keys)
- {
- if (keys.Length == 0)
- {
- return new BatchResults(new BatchRecord[0], true);
- }
-
- if (batchPolicy == null)
- {
- batchPolicy = batchParentPolicyWriteDefault;
- }
-
- if (deletePolicy == null)
- {
- deletePolicy = batchDeletePolicyDefault;
- }
-
- BatchAttr attr = new BatchAttr();
- attr.SetDelete(deletePolicy);
-
- BatchRecord[] records = new BatchRecord[keys.Length];
-
- for (int i = 0; i < keys.Length; i++)
- {
- records[i] = new BatchRecord(keys[i], attr.hasWrite);
- }
-
- try
- {
- BatchStatus status = new BatchStatus(true);
- List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status);
- BatchCommand[] commands = new BatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, null, records, attr, status);
- }
-
- BatchExecutor.Execute(cluster, batchPolicy, commands, status);
- return new BatchResults(records, status.GetStatus());
- }
- catch (Exception e)
- {
- // Batch terminated on fatal error.
- throw new AerospikeException.BatchRecordArray(records, e);
- }
- }
-
- ///
- /// Remove records in specified namespace/set efficiently. This method is many orders of magnitude
- /// faster than deleting records one at a time.
- ///
- /// See https://www.aerospike.com/docs/reference/info#truncate
- ///
- ///
- /// This asynchronous server call may return before the truncation is complete. The user can still
- /// write new records after the server returns because new records will have last update times
- /// greater than the truncate cutoff (set at the time of truncate call).
- ///
- ///
- /// info command configuration parameters, pass in null for defaults
- /// required namespace
- /// optional set name. Pass in null to delete all sets in namespace.
- ///
- /// optionally delete records before record last update time.
- /// If specified, value must be before the current time.
- /// Pass in null to delete all records in namespace/set regardless of last update time.
- ///
- public void Truncate(InfoPolicy policy, string ns, string set, DateTime? beforeLastUpdate)
- {
- if (policy == null)
- {
- policy = infoPolicyDefault;
- }
-
- // Send truncate command to one node. That node will distribute the command to other nodes.
- Node node = cluster.GetRandomNode();
-
- StringBuilder sb = new StringBuilder(200);
-
- if (set != null)
- {
- sb.Append("truncate:namespace=");
- sb.Append(ns);
- sb.Append(";set=");
- sb.Append(set);
- }
- else
- {
- sb.Append("truncate-namespace:namespace=");
- sb.Append(ns);
- }
-
- if (beforeLastUpdate.HasValue)
- {
- sb.Append(";lut=");
- // Convert to nanoseconds since unix epoch.
- sb.Append(Util.NanosFromEpoch(beforeLastUpdate.Value));
- }
-
- string response = Info.Request(policy, node, sb.ToString());
-
- if (!response.Equals("ok", StringComparison.CurrentCultureIgnoreCase))
- {
- throw new AerospikeException("Truncate failed: " + response);
- }
- }
-
- //-------------------------------------------------------
- // Touch Operations
- //-------------------------------------------------------
-
- ///
- /// Reset record's time to expiration using the policy's expiration.
- /// If the record does not exist, it can't be created because the server deletes empty records.
- /// Throw an exception if the record does not exist.
- ///
- /// write configuration parameters, pass in null for defaults
- /// unique record identifier
- /// if touch fails
- public void Touch(WritePolicy policy, Key key)
- {
- if (policy == null)
- {
- policy = writePolicyDefault;
- }
- TouchCommand command = new TouchCommand(cluster, policy, key);
- command.Execute();
- }
-
- ///
- /// Reset record's time to expiration using the policy's expiration.
- /// If the record does not exist, it can't be created because the server deletes empty records.
- /// Return true if the record exists and is touched.Return false if the record does not exist.
- ///
- /// write configuration parameters, pass in null for defaults
- /// unique record identifier
- /// true if record was touched, false otherwise
- /// if touch fails
- public bool Touched(WritePolicy policy, Key key)
- {
- if (policy == null)
- {
- policy = writePolicyDefault;
- }
- TouchCommand command = new(cluster, policy, key, false);
- command.Execute();
-
- return command.Touched;
- }
-
- //-------------------------------------------------------
- // Existence-Check Operations
- //-------------------------------------------------------
-
- ///
- /// Determine if a record key exists.
- /// Return whether record exists or not.
- /// The policy can be used to specify timeouts.
- ///
- /// generic configuration parameters, pass in null for defaults
- /// unique record identifier
- /// if command fails
- public bool Exists(Policy policy, Key key)
- {
- if (policy == null)
- {
- policy = readPolicyDefault;
- }
- ExistsCommand command = new ExistsCommand(cluster, policy, key);
- command.Execute();
- return command.Exists();
- }
-
- ///
- /// Check if multiple record keys exist in one batch call.
- /// The returned boolean array is in positional order with the original key array order.
- ///
- /// batch configuration parameters, pass in null for defaults
- /// array of unique record identifiers
- /// which contains results for keys that did complete
- public bool[] Exists(BatchPolicy policy, Key[] keys)
- {
- if (keys.Length == 0)
- {
- return new bool[0];
- }
-
- if (policy == null)
- {
- policy = batchPolicyDefault;
- }
-
-
- bool[] existsArray = new bool[keys.Length];
-
- try
- {
- BatchStatus status = new BatchStatus(false);
-
- if (policy.allowProleReads)
- {
- // Send all requests to a single random node.
- Node node = cluster.GetRandomNode();
- BatchNode batchNode = new BatchNode(node, keys);
- BatchCommand command = new BatchExistsArrayCommand(cluster, batchNode, policy, keys, existsArray, status);
- BatchExecutor.Execute(command, status);
- return existsArray;
- }
-
- List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status);
- BatchCommand[] commands = new BatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new BatchExistsArrayCommand(cluster, batchNode, policy, keys, existsArray, status);
- }
- BatchExecutor.Execute(cluster, policy, commands, status);
- return existsArray;
- }
- catch (Exception e)
- {
- throw new AerospikeException.BatchExists(existsArray, e);
- }
- }
-
- //-------------------------------------------------------
- // Read Record Operations
- //-------------------------------------------------------
-
- ///
- /// Read entire record for specified key.
- /// If found, return record instance. If not found, return null.
- /// The policy can be used to specify timeouts.
- ///
- /// generic configuration parameters, pass in null for defaults
- /// unique record identifier
- /// if read fails
- public Record Get(Policy policy, Key key)
- {
- if (policy == null)
- {
- policy = readPolicyDefault;
- }
- ReadCommand command = new ReadCommand(cluster, policy, key);
- command.Execute();
- return command.Record;
- }
-
- ///
- /// Read record header and bins for specified key.
- /// If found, return record instance. If not found, return null.
- /// The policy can be used to specify timeouts.
- ///
- /// generic configuration parameters, pass in null for defaults
- /// unique record identifier
- /// bins to retrieve
- /// if read fails
- public Record Get(Policy policy, Key key, params string[] binNames)
- {
- if (policy == null)
- {
- policy = readPolicyDefault;
- }
- ReadCommand command = new ReadCommand(cluster, policy, key, binNames);
- command.Execute();
- return command.Record;
- }
-
- ///
- /// Read record generation and expiration only for specified key. Bins are not read.
- /// If found, return record instance. If not found, return null.
- /// The policy can be used to specify timeouts.
- ///
- /// generic configuration parameters, pass in null for defaults
- /// unique record identifier
- /// if read fails
- public Record GetHeader(Policy policy, Key key)
- {
- if (policy == null)
- {
- policy = readPolicyDefault;
- }
- ReadHeaderCommand command = new ReadHeaderCommand(cluster, policy, key);
- command.Execute();
- return command.Record;
- }
-
- //-------------------------------------------------------
- // Batch Read Operations
- //-------------------------------------------------------
-
- ///
- /// Read multiple records for specified batch keys in one batch call.
- /// This method allows different namespaces/bins to be requested for each key in the batch.
- /// The returned records are located in the same list.
- /// If the BatchRead key field is not found, the corresponding record field will be null.
- ///
- /// batch configuration parameters, pass in null for defaults
- /// list of unique record identifiers and the bins to retrieve.
- /// The returned records are located in the same list.
- /// true if all batch key requests succeeded
- /// if read fails
- public bool Get(BatchPolicy policy, List records)
- {
- if (records.Count == 0)
- {
- return true;
- }
-
- if (policy == null)
- {
- policy = batchPolicyDefault;
- }
-
- BatchStatus status = new BatchStatus(true);
- List batchNodes = BatchNode.GenerateList(cluster, policy, records, status);
- BatchCommand[] commands = new BatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new BatchReadListCommand(cluster, batchNode, policy, records, status);
- }
- BatchExecutor.Execute(cluster, policy, commands, status);
- return status.GetStatus();
- }
-
- ///
- /// Read multiple records for specified keys in one batch call.
- /// The returned records are in positional order with the original key array order.
- /// If a key is not found, the positional record will be null.
- ///
- /// batch configuration parameters, pass in null for defaults
- /// array of unique record identifiers
- /// which contains results for keys that did complete
- public Record[] Get(BatchPolicy policy, Key[] keys)
- {
- if (keys.Length == 0)
- {
- return new Record[0];
- }
-
- if (policy == null)
- {
- policy = batchPolicyDefault;
- }
-
- Record[] records = new Record[keys.Length];
-
- try
- {
- BatchStatus status = new BatchStatus(false);
-
- if (policy.allowProleReads)
- {
- // Send all requests to a single random node.
- Node node = cluster.GetRandomNode();
- BatchNode batchNode = new BatchNode(node, keys);
- BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_GET_ALL, false, status);
- BatchExecutor.Execute(command, status);
- return records;
- }
-
- List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status);
- BatchCommand[] commands = new BatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_GET_ALL, false, status);
- }
- BatchExecutor.Execute(cluster, policy, commands, status);
- return records;
- }
- catch (Exception e)
- {
- throw new AerospikeException.BatchRecords(records, e);
- }
- }
-
- ///
- /// Read multiple record headers and bins for specified keys in one batch call.
- /// The returned records are in positional order with the original key array order.
- /// If a key is not found, the positional record will be null.
- ///
- /// batch configuration parameters, pass in null for defaults
- /// array of unique record identifiers
- /// array of bins to retrieve
- /// which contains results for keys that did complete
- public Record[] Get(BatchPolicy policy, Key[] keys, params string[] binNames)
- {
- if (keys.Length == 0)
- {
- return new Record[0];
- }
-
- if (policy == null)
- {
- policy = batchPolicyDefault;
- }
-
- Record[] records = new Record[keys.Length];
-
- try
- {
- BatchStatus status = new BatchStatus(false);
-
- if (policy.allowProleReads)
- {
- // Send all requests to a single random node.
- Node node = cluster.GetRandomNode();
- BatchNode batchNode = new BatchNode(node, keys);
- BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, binNames, null, records, Command.INFO1_READ, false, status);
- BatchExecutor.Execute(command, status);
- return records;
- }
-
- List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status);
- BatchCommand[] commands = new BatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, binNames, null, records, Command.INFO1_READ, false, status);
- }
- BatchExecutor.Execute(cluster, policy, commands, status);
- return records;
- }
- catch (Exception e)
- {
- throw new AerospikeException.BatchRecords(records, e);
- }
- }
-
- ///
- /// Read multiple records for specified keys using read operations in one batch call.
- /// The returned records are in positional order with the original key array order.
- /// If a key is not found, the positional record will be null.
- ///
- /// batch configuration parameters, pass in null for defaults
- /// array of unique record identifiers
- /// array of read operations on record
- /// which contains results for keys that did complete
- public Record[] Get(BatchPolicy policy, Key[] keys, params Operation[] ops)
- {
- if (keys.Length == 0)
- {
- return new Record[0];
- }
-
- if (policy == null)
- {
- policy = batchPolicyDefault;
- }
-
- Record[] records = new Record[keys.Length];
-
- try
- {
- BatchStatus status = new BatchStatus(false);
-
- if (policy.allowProleReads)
- {
- // Send all requests to a single random node.
- Node node = cluster.GetRandomNode();
- BatchNode batchNode = new BatchNode(node, keys);
- BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, ops, records, Command.INFO1_READ, true, status);
- BatchExecutor.Execute(command, status);
- return records;
- }
-
- List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status);
- BatchCommand[] commands = new BatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, ops, records, Command.INFO1_READ, true, status);
- }
- BatchExecutor.Execute(cluster, policy, commands, status);
- return records;
- }
- catch (Exception e)
- {
- throw new AerospikeException.BatchRecords(records, e);
- }
- }
-
- ///
- /// Read multiple record header data for specified keys in one batch call.
- /// The returned records are in positional order with the original key array order.
- /// If a key is not found, the positional record will be null.
- ///
- /// batch configuration parameters, pass in null for defaults
- /// array of unique record identifiers
- /// which contains results for keys that did complete
- public Record[] GetHeader(BatchPolicy policy, Key[] keys)
- {
- if (keys.Length == 0)
- {
- return new Record[0];
- }
-
- if (policy == null)
- {
- policy = batchPolicyDefault;
- }
-
- Record[] records = new Record[keys.Length];
-
- try
- {
- BatchStatus status = new BatchStatus(false);
-
- if (policy.allowProleReads)
- {
- // Send all requests to a single random node.
- Node node = cluster.GetRandomNode();
- BatchNode batchNode = new BatchNode(node, keys);
- BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_NOBINDATA, false, status);
- BatchExecutor.Execute(command, status);
- return records;
- }
-
- List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status);
- BatchCommand[] commands = new BatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_NOBINDATA, false, status);
- }
- BatchExecutor.Execute(cluster, policy, commands, status);
- return records;
- }
- catch (Exception e)
- {
- throw new AerospikeException.BatchRecords(records, e);
- }
- }
-
- //-------------------------------------------------------
- // Join methods
- //-------------------------------------------------------
-
- ///
- /// Read specified bins in left record and then join with right records. Each join bin name
- /// (Join.leftKeysBinName) must exist in the left record. The join bin must contain a list of
- /// keys. Those key are used to retrieve other records using a separate batch get.
- ///
- /// generic configuration parameters, pass in null for defaults
- /// unique main record identifier
- /// array of bins to retrieve
- /// array of join definitions
- /// if main read or join reads fail
- public Record Join(BatchPolicy policy, Key key, string[] binNames, params Join[] joins)
- {
- string[] names = new string[binNames.Length + joins.Length];
- int count = 0;
-
- foreach (string binName in binNames)
- {
- names[count++] = binName;
- }
-
- foreach (Join join in joins)
- {
- names[count++] = join.leftKeysBinName;
- }
- Record record = Get(policy, key, names);
- JoinRecords(policy, record, joins);
- return record;
- }
-
- ///
- /// Read all bins in left record and then join with right records. Each join bin name
- /// (Join.binNameKeys) must exist in the left record. The join bin must contain a list of
- /// keys. Those key are used to retrieve other records using a separate batch get.
- ///
- /// generic configuration parameters, pass in null for defaults
- /// unique main record identifier
- /// array of join definitions
- /// if main read or join reads fail
- public Record Join(BatchPolicy policy, Key key, params Join[] joins)
- {
- Record record = Get(policy, key);
- JoinRecords(policy, record, joins);
- return record;
- }
-
- //-------------------------------------------------------
- // Generic Database Operations
- //-------------------------------------------------------
-
- ///
- /// Perform multiple read/write operations on a single key in one batch call.
- /// An example would be to add an integer value to an existing record and then
- /// read the result, all in one database call.
- ///
- /// The server executes operations in the same order as the operations array.
- /// Both scalar bin operations (Operation) and CDT bin operations (ListOperation,
- /// MapOperation) can be performed in same call.
- ///
- ///
- /// write configuration parameters, pass in null for defaults
- /// unique record identifier
- /// database operations to perform
- /// if command fails
- public Record Operate(WritePolicy policy, Key key, params Operation[] operations)
- {
- OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, key, operations);
- OperateCommand command = new OperateCommand(cluster, key, args);
- command.Execute();
- return command.Record;
- }
-
- //-------------------------------------------------------
- // Batch Read/Write Operations
- //-------------------------------------------------------
-
- ///
- /// Read/Write multiple records for specified batch keys in one batch call.
- /// This method allows different namespaces/bins for each key in the batch.
- /// The returned records are located in the same list.
- ///
- /// can be , , or
- /// .
- ///
- ///
- /// Requires server version 6.0+
- ///
- ///
- /// batch configuration parameters, pass in null for defaults
- /// list of unique record identifiers and read/write operations
- /// true if all batch sub-commands succeeded
- /// if command fails
- public bool Operate(BatchPolicy policy, List records)
- {
- if (records.Count == 0)
- {
- return true;
- }
-
- if (policy == null)
- {
- policy = batchParentPolicyWriteDefault;
- }
-
- BatchStatus status = new BatchStatus(true);
- List batchNodes = BatchNode.GenerateList(cluster, policy, records, status);
- BatchCommand[] commands = new BatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new BatchOperateListCommand(cluster, batchNode, policy, records, status);
- }
- BatchExecutor.Execute(cluster, policy, commands, status);
- return status.GetStatus();
- }
-
- ///
- /// Perform read/write operations on multiple keys. If a key is not found, the corresponding result
- /// will be .
- ///
- /// Requires server version 6.0+
- ///
- ///
- /// batch configuration parameters, pass in null for defaults
- /// write configuration parameters, pass in null for defaults
- /// array of unique record identifiers
- ///
- /// read/write operations to perform. is not allowed because it returns a
- /// variable number of bins and makes it difficult (sometimes impossible) to lineup operations with
- /// results. Instead, use for each bin name.
- ///
- /// which contains results for keys that did complete
- public BatchResults Operate(BatchPolicy batchPolicy, BatchWritePolicy writePolicy, Key[] keys, params Operation[] ops)
- {
- if (keys.Length == 0)
- {
- return new BatchResults(new BatchRecord[0], true);
- }
-
- if (batchPolicy == null)
- {
- batchPolicy = batchParentPolicyWriteDefault;
- }
-
- if (writePolicy == null)
- {
- writePolicy = batchWritePolicyDefault;
- }
-
- BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops);
- BatchRecord[] records = new BatchRecord[keys.Length];
-
- for (int i = 0; i < keys.Length; i++)
- {
- records[i] = new BatchRecord(keys[i], attr.hasWrite);
- }
-
- try
- {
- BatchStatus status = new BatchStatus(true);
- List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status);
- BatchCommand[] commands = new BatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, ops, records, attr, status);
- }
-
- BatchExecutor.Execute(cluster, batchPolicy, commands, status);
- return new BatchResults(records, status.GetStatus());
- }
- catch (Exception e)
- {
- throw new AerospikeException.BatchRecordArray(records, e);
- }
- }
-
- //-------------------------------------------------------
- // Scan Operations
- //-------------------------------------------------------
-
- ///
- /// Read all records in specified namespace and set. If the policy's
- /// concurrentNodes is specified, each server node will be read in
- /// parallel. Otherwise, server nodes are read in series.
- ///
- /// This call will block until the scan is complete - callbacks are made
- /// within the scope of this call.
- ///
- ///
- /// scan configuration parameters, pass in null for defaults
- /// namespace - equivalent to database name
- /// optional set name - equivalent to database table
- /// read callback method - called with record data
- ///
- /// optional bin to retrieve. All bins will be returned if not specified.
- ///
- /// if scan fails
- public void ScanAll(ScanPolicy policy, string ns, string setName, ScanCallback callback, params string[] binNames)
- {
- if (policy == null)
- {
- policy = scanPolicyDefault;
- }
-
- Node[] nodes = cluster.ValidateNodes();
- PartitionTracker tracker = new PartitionTracker(policy, nodes);
- ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker);
- }
-
- ///
- /// Read all records in specified namespace and set for one node only.
- /// The node is specified by name.
- ///
- /// This call will block until the scan is complete - callbacks are made
- /// within the scope of this call.
- ///
- ///
- /// scan configuration parameters, pass in null for defaults
- /// server node name
- /// namespace - equivalent to database name
- /// optional set name - equivalent to database table
- /// read callback method - called with record data
- ///
- /// optional bin to retrieve. All bins will be returned if not specified.
- ///
- /// if scan fails
- public void ScanNode(ScanPolicy policy, string nodeName, string ns, string setName, ScanCallback callback, params string[] binNames)
- {
- Node node = cluster.GetNode(nodeName);
- ScanNode(policy, node, ns, setName, callback, binNames);
- }
-
- ///
- /// Read all records in specified namespace and set for one node only.
- ///
- /// This call will block until the scan is complete - callbacks are made
- /// within the scope of this call.
- ///
- ///
- /// scan configuration parameters, pass in null for defaults
- /// server node
- /// namespace - equivalent to database name
- /// optional set name - equivalent to database table
- /// read callback method - called with record data
- ///
- /// optional bin to retrieve. All bins will be returned if not specified.
- ///
- /// if scan fails
- public void ScanNode(ScanPolicy policy, Node node, string ns, string setName, ScanCallback callback, params string[] binNames)
- {
- if (policy == null)
- {
- policy = scanPolicyDefault;
- }
-
- PartitionTracker tracker = new PartitionTracker(policy, node);
- ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker);
- }
-
- ///
- /// Read records in specified namespace, set and partition filter.
- ///
- /// This call will block until the scan is complete - callbacks are made
- /// within the scope of this call.
- ///
- ///
- /// scan configuration parameters, pass in null for defaults
- /// filter on a subset of data partitions
- /// namespace - equivalent to database name
- /// optional set name - equivalent to database table
- /// read callback method - called with record data
- /// optional bin to retrieve. All bins will be returned if not specified.
- /// if scan fails
- public void ScanPartitions(ScanPolicy policy, PartitionFilter partitionFilter, string ns, string setName, ScanCallback callback, params string[] binNames)
- {
- if (policy == null)
- {
- policy = scanPolicyDefault;
- }
-
- Node[] nodes = cluster.ValidateNodes();
- PartitionTracker tracker = new PartitionTracker(policy, nodes, partitionFilter);
- ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker);
- }
-
- //---------------------------------------------------------------
- // User defined functions
- //---------------------------------------------------------------
-
- ///
- /// Register package located in a file containing user defined functions with server.
- /// This asynchronous server call will return before command is complete.
- /// The user can optionally wait for command completion by using the returned
- /// RegisterTask instance.
- ///
- /// generic configuration parameters, pass in null for defaults
- /// path of client file containing user defined functions, relative to current directory
- /// path to store user defined functions on the server, relative to configured script directory.
- /// language of user defined functions
- /// if register fails
- public RegisterTask Register(Policy policy, string clientPath, string serverPath, Language language)
- {
- if (policy == null)
- {
- policy = writePolicyDefault;
- }
- string content = Util.ReadFileEncodeBase64(clientPath);
- return RegisterCommand.Register(cluster, policy, content, serverPath, language);
- }
-
- ///
- /// Register package located in a resource containing user defined functions with server.
- /// This asynchronous server call will return before command is complete.
- /// The user can optionally wait for command completion by using the returned
- /// RegisterTask instance.
- ///
- /// generic configuration parameters, pass in null for defaults
- /// assembly where resource is located. Current assembly can be obtained by: Assembly.GetExecutingAssembly()
- /// namespace path where Lua resource is located. Example: Aerospike.Client.Resources.mypackage.lua
- /// path to store user defined functions on the server, relative to configured script directory.
- /// language of user defined functions
- /// if register fails
- public RegisterTask Register(Policy policy, Assembly resourceAssembly, string resourcePath, string serverPath, Language language)
- {
- if (policy == null)
- {
- policy = writePolicyDefault;
- }
- string content;
- using (Stream stream = resourceAssembly.GetManifestResourceStream(resourcePath))
- {
- byte[] bytes = new byte[stream.Length];
- stream.Read(bytes, 0, bytes.Length);
- content = Convert.ToBase64String(bytes);
- }
- return RegisterCommand.Register(cluster, policy, content, serverPath, language);
- }
-
- ///
- /// Register UDF functions located in a code string with server. Example:
- ///
- /// String code = @"
- /// local function reducer(val1,val2)
- /// return val1 + val2
- /// end
- ///
- /// function sum_single_bin(stream,name)
- /// local function mapper(rec)
- /// return rec[name]
- /// end
- /// return stream : map(mapper) : reduce(reducer)
- /// end
- ///";
- ///
- /// client.RegisterUdfString(null, code, "mysum.lua", Language.LUA);
- ///
- ///
- /// This asynchronous server call will return before command is complete.
- /// The user can optionally wait for command completion by using the returned
- /// RegisterTask instance.
- ///
- ///
- /// generic configuration parameters, pass in null for defaults
- /// code string containing user defined functions
- /// path to store user defined functions on the server, relative to configured script directory.
- /// language of user defined functions
- /// if register fails
- public RegisterTask RegisterUdfString(Policy policy, string code, string serverPath, Language language)
- {
- if (policy == null)
- {
- policy = writePolicyDefault;
- }
- byte[] bytes = ByteUtil.StringToUtf8(code);
- string content = Convert.ToBase64String(bytes);
- return RegisterCommand.Register(cluster, policy, content, serverPath, language);
- }
-
- ///
- /// Remove user defined function from server nodes.
- ///
- /// info configuration parameters, pass in null for defaults
- /// location of UDF on server nodes. Example: mylua.lua
- /// if remove fails
- public void RemoveUdf(InfoPolicy policy, string serverPath)
- {
- if (policy == null)
- {
- policy = infoPolicyDefault;
- }
- // Send UDF command to one node. That node will distribute the UDF command to other nodes.
- string command = "udf-remove:filename=" + serverPath;
- Node node = cluster.GetRandomNode();
- string response = Info.Request(policy, node, command);
-
- if (response.Equals("ok", StringComparison.CurrentCultureIgnoreCase))
- {
- return;
- }
-
- if (response.StartsWith("error=file_not_found"))
- {
- // UDF has already been removed.
- return;
- }
- throw new AerospikeException("Remove UDF failed: " + response);
- }
-
- ///
- /// Execute user defined function on server and return results.
- /// The function operates on a single record.
- /// The package name is used to locate the udf file location:
- ///
- /// udf file = <server udf dir>/<package name>.lua
- ///
- ///
- /// generic configuration parameters, pass in null for defaults
- /// unique record identifier
- /// server package name where user defined function resides
- /// user defined function
- /// arguments passed in to user defined function
- /// if transaction fails
- public object Execute(WritePolicy policy, Key key, string packageName, string functionName, params Value[] args)
- {
- if (policy == null)
- {
- policy = writePolicyDefault;
- }
- ExecuteCommand command = new ExecuteCommand(cluster, policy, key, packageName, functionName, args);
- command.Execute();
-
- Record record = command.Record;
-
- if (record == null || record.bins == null)
- {
- return null;
- }
-
- IDictionary map = record.bins;
- object obj;
-
- if (map.TryGetValue("SUCCESS", out obj))
- {
- return obj;
- }
-
- if (map.TryGetValue("FAILURE", out obj))
- {
- throw new AerospikeException(obj.ToString());
- }
- throw new AerospikeException("Invalid UDF return value");
- }
-
- ///
- /// Execute user defined function on server for each key and return results.
- /// The package name is used to locate the udf file location:
- ///
- /// udf file = <server udf dir>/<package name>.lua
- ///
- ///
- /// Requires server version 6.0+
- ///
- ///
- /// batch configuration parameters, pass in null for defaults
- /// udf configuration parameters, pass in null for defaults
- /// array of unique record identifiers
- /// server package name where user defined function resides
- /// user defined function
- /// arguments passed in to user defined function
- /// which contains results for keys that did complete
- public BatchResults Execute(BatchPolicy batchPolicy, BatchUDFPolicy udfPolicy, Key[] keys, string packageName, string functionName, params Value[] functionArgs)
- {
- if (keys.Length == 0)
- {
- return new BatchResults(new BatchRecord[0], true);
- }
-
- if (batchPolicy == null)
- {
- batchPolicy = batchParentPolicyWriteDefault;
- }
-
- if (udfPolicy == null)
- {
- udfPolicy = batchUDFPolicyDefault;
- }
-
- byte[] argBytes = Packer.Pack(functionArgs);
-
- BatchAttr attr = new BatchAttr();
- attr.SetUDF(udfPolicy);
-
- BatchRecord[] records = new BatchRecord[keys.Length];
-
- for (int i = 0; i < keys.Length; i++)
- {
- records[i] = new BatchRecord(keys[i], attr.hasWrite);
- }
-
- try
- {
- BatchStatus status = new BatchStatus(true);
- List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status);
- BatchCommand[] commands = new BatchCommand[batchNodes.Count];
- int count = 0;
-
- foreach (BatchNode batchNode in batchNodes)
- {
- commands[count++] = new BatchUDFCommand(cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr, status);
- }
-
- BatchExecutor.Execute(cluster, batchPolicy, commands, status);
- return new BatchResults(records, status.GetStatus());
- }
- catch (Exception e)
- {
- // Batch terminated on fatal error.
- throw new AerospikeException.BatchRecordArray(records, e);
- }
- }
-
- //----------------------------------------------------------
- // Query/Execute
- //----------------------------------------------------------
-
- ///
- /// Apply user defined function on records that match the background query statement filter.
- /// Records are not returned to the client.
- /// This asynchronous server call will return before the command is complete.
- /// The user can optionally wait for command completion by using the returned
- /// ExecuteTask instance.
- ///
- /// configuration parameters, pass in null for defaults
- /// background query definition
- /// server package where user defined function resides
- /// function name
- /// to pass to function name, if any
- /// if command fails
- public ExecuteTask Execute(WritePolicy policy, Statement statement, string packageName, string functionName, params Value[] functionArgs)
- {
- if (policy == null)
- {
- policy = writePolicyDefault;
- }
-
- statement.PackageName = packageName;
- statement.FunctionName = functionName;
- statement.FunctionArgs = functionArgs;
-
- cluster.AddTran();
-
- ulong taskId = statement.PrepareTaskId();
- Node[] nodes = cluster.ValidateNodes();
- Executor executor = new Executor(nodes.Length);
-
- foreach (Node node in nodes)
- {
- ServerCommand command = new ServerCommand(cluster, node, policy, statement, taskId);
- executor.AddCommand(command);
- }
-
- executor.Execute(nodes.Length);
- return new ExecuteTask(cluster, policy, statement, taskId);
- }
-
- ///
- /// Apply operations on records that match the background query statement filter.
- /// Records are not returned to the client.
- /// This asynchronous server call will return before the command is complete.
- /// The user can optionally wait for command completion by using the returned
- /// ExecuteTask instance.
- ///
- /// write configuration parameters, pass in null for defaults
- /// background query definition
- /// list of operations to be performed on selected records
- /// if command fails
- public ExecuteTask Execute(WritePolicy policy, Statement statement, params Operation[] operations)
- {
- if (policy == null)
- {
- policy = writePolicyDefault;
- }
-
- if (operations.Length > 0)
- {
- statement.Operations = operations;
- }
-
- cluster.AddTran();
-
- ulong taskId = statement.PrepareTaskId();
- Node[] nodes = cluster.ValidateNodes();
- Executor executor = new Executor(nodes.Length);
-
- foreach (Node node in nodes)
- {
- ServerCommand command = new ServerCommand(cluster, node, policy, statement, taskId);
- executor.AddCommand(command);
- }
- executor.Execute(nodes.Length);
- return new ExecuteTask(cluster, policy, statement, taskId);
- }
-
- //--------------------------------------------------------
- // Query functions
- //--------------------------------------------------------
-
- ///
- /// Execute query and call action for each record returned from server.
- ///
- /// generic configuration parameters, pass in null for defaults
- /// query definition
- /// action methods to be called for each record
- /// if query fails
- public void Query(QueryPolicy policy, Statement statement, Action action)
- {
- using (RecordSet rs = Query(policy, statement))
- {
- while (rs.Next())
- {
- action(rs.Key, rs.Record);
- }
- }
- }
-
- ///
- /// Execute query and return record iterator. The query executor puts records on a queue in
- /// separate threads. The calling thread concurrently pops records off the queue through the
- /// record iterator.
- ///
- /// generic configuration parameters, pass in null for defaults
- /// query definition
- /// if query fails
- public RecordSet Query(QueryPolicy policy, Statement statement)
- {
- if (policy == null)
- {
- policy = queryPolicyDefault;
- }
-
- Node[] nodes = cluster.ValidateNodes();
-
- if (cluster.hasPartitionQuery || statement.filter == null)
- {
- PartitionTracker tracker = new PartitionTracker(policy, statement, nodes);
- QueryPartitionExecutor executor = new QueryPartitionExecutor(cluster, policy, statement, nodes.Length, tracker);
- return executor.RecordSet;
- }
- else
- {
- QueryRecordExecutor executor = new QueryRecordExecutor(cluster, policy, statement, nodes);
- executor.Execute();
- return executor.RecordSet;
- }
- }
-
- ///
- /// Execute query on all server nodes and return records via the listener. This method will
- /// block until the query is complete. Listener callbacks are made within the scope of this call.
- ///
- /// If is not 1, the supplied listener must handle
- /// shared data in a thread-safe manner, because the listener will be called by multiple query
- /// threads (one thread per node) in parallel.
- ///
- ///
- /// Requires server version 6.0+ if using a secondary index query.
- ///
- ///
- /// query configuration parameters, pass in null for defaults
- /// query definition
- /// where to send results
- /// if query fails
- public void Query(QueryPolicy policy, Statement statement, QueryListener listener)
- {
- if (policy == null)
- {
- policy = queryPolicyDefault;
- }
-
- Node[] nodes = cluster.ValidateNodes();
-
- if (cluster.hasPartitionQuery || statement.filter == null)
- {
- PartitionTracker tracker = new PartitionTracker(policy, statement, nodes);
- QueryListenerExecutor.execute(cluster, policy, statement, listener, tracker);
- }
- else
- {
- throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Query by partition is not supported");
- }
- }
-
- ///
- /// Execute query for specified partitions and return records via the listener. This method will
- /// block until the query is complete. Listener callbacks are made within the scope of this call.
- ///
- /// If is not 1, the supplied listener must handle
- /// shared data in a thread-safe manner, because the listener will be called by multiple query
- /// threads (one thread per node) in parallel.
- ///
- ///
- /// The completion status of all partitions is stored in the partitionFilter when the query terminates.
- /// This partitionFilter can then be used to resume an incomplete query at a later time.
- /// This is the preferred method for query terminate/resume functionality.
- ///
- ///
- /// Requires server version 6.0+ if using a secondary index query.
- ///
- ///
- /// query configuration parameters, pass in null for defaults
- /// query definition
- ///
- /// data partition filter. Set to for all partitions.
- ///
- /// where to send results
- /// if query fails
- public void Query
- (
- QueryPolicy policy,
- Statement statement,
- PartitionFilter partitionFilter,
- QueryListener listener
- )
- {
- if (policy == null)
- {
- policy = queryPolicyDefault;
- }
-
- Node[] nodes = cluster.ValidateNodes();
-
- if (cluster.hasPartitionQuery || statement.filter == null)
- {
- PartitionTracker tracker = new PartitionTracker(policy, statement, nodes, partitionFilter);
- QueryListenerExecutor.execute(cluster, policy, statement, listener, tracker);
- }
- else
- {
- throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Query by partition is not supported");
- }
- }
-
- ///
- /// Execute query for specified partitions and return record iterator. The query executor puts
- /// records on a queue in separate threads. The calling thread concurrently pops records off
- /// the queue through the record iterator.
- ///
- /// Requires server version 6.0+ if using a secondary index query.
- ///
- ///
- /// query configuration parameters, pass in null for defaults
- /// query definition
- /// filter on a subset of data partitions
- /// if query fails
- public RecordSet QueryPartitions
- (
- QueryPolicy policy,
- Statement statement,
- PartitionFilter partitionFilter
- )
- {
- if (policy == null)
- {
- policy = queryPolicyDefault;
- }
-
- Node[] nodes = cluster.ValidateNodes();
-
- if (cluster.hasPartitionQuery || statement.filter == null)
- {
- PartitionTracker tracker = new PartitionTracker(policy, statement, nodes, partitionFilter);
- QueryPartitionExecutor executor = new QueryPartitionExecutor(cluster, policy, statement, nodes.Length, tracker);
- return executor.RecordSet;
- }
- else
- {
- throw new AerospikeException(ResultCode.PARAMETER_ERROR, "QueryPartitions() not supported");
- }
- }
-
- ///
- /// Execute query, apply statement's aggregation function, and return result iterator.
- /// The aggregation function should be located in a Lua script file that can be found from the
- /// "LuaConfig.PackagePath" paths static variable. The default package path is "udf/?.lua"
- /// where "?" is the packageName.
- ///
- /// The query executor puts results on a queue in separate threads. The calling thread
- /// concurrently pops results off the queue through the ResultSet iterator.
- /// The aggregation function is called on both server and client (final reduce).
- /// Therefore, the Lua script file must also reside on both server and client.
- ///
- ///
- /// query configuration parameters, pass in null for defaults
- /// query definition
- /// server package where user defined function resides
- /// aggregation function name
- /// arguments to pass to function name, if any
- /// if query fails
- public ResultSet QueryAggregate
- (
- QueryPolicy policy,
- Statement statement,
- string packageName,
- string functionName,
- params Value[] functionArgs
- )
- {
- statement.SetAggregateFunction(packageName, functionName, functionArgs);
- return QueryAggregate(policy, statement);
- }
-
- ///
- /// Execute query, apply statement's aggregation function, call action for each aggregation
- /// object returned from server.
- ///
- /// query configuration parameters, pass in null for defaults
- ///
- /// query definition with aggregate functions already initialized by SetAggregateFunction().
- ///
- /// action methods to be called for each aggregation object
- /// if query fails
- public void QueryAggregate(QueryPolicy policy, Statement statement, Action