From dff4734c38f917f828bc6b78e29a8327f3694665 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Thu, 1 Aug 2024 13:56:21 -0600 Subject: [PATCH 01/41] First pass at MRT --- AerospikeClient/Async/AsyncBatch.cs | 107 +- AerospikeClient/Async/AsyncClient.cs | 48 +- AerospikeClient/Async/IAsyncClient.cs | 37 +- AerospikeClient/Command/Batch.cs | 207 ++- AerospikeClient/Command/BatchAttr.cs | 27 + AerospikeClient/Command/ByteUtil.cs | 55 +- AerospikeClient/Command/Command.cs | 1342 +++++++++++++---- AerospikeClient/Command/FieldType.cs | 7 +- AerospikeClient/Command/OperateArgs.cs | 1 - AerospikeClient/Command/SyncWriteCommand.cs | 162 ++ AerospikeClient/Command/TranAddKeys.cs | 107 ++ AerospikeClient/Command/TranClose.cs | 47 + .../Command/TranMarkRollForward.cs | 47 + AerospikeClient/Command/TranMonitor.cs | 160 ++ AerospikeClient/Command/TranRoll.cs | 262 ++++ AerospikeClient/Command/WriteCommand.cs | 31 +- AerospikeClient/Listener/AbortListener.cs | 35 + AerospikeClient/Listener/CommitListener.cs | 35 + AerospikeClient/Main/AbortError.cs | 41 + AerospikeClient/Main/AerospikeClient.cs | 76 +- AerospikeClient/Main/AerospikeException.cs | 151 +- AerospikeClient/Main/CommitError.cs | 49 + AerospikeClient/Main/IAerospikeClient.cs | 35 + AerospikeClient/Main/ResultCode.cs | 38 +- AerospikeClient/Main/Tran.cs | 149 ++ AerospikeClient/Policy/BatchPolicy.cs | 2 +- AerospikeClient/Policy/ClientPolicy.cs | 17 +- AerospikeClient/Policy/Policy.cs | 9 + AerospikeClient/Policy/TranRollPolicy.cs | 45 + AerospikeClient/Policy/TranVerifyPolicy.cs | 46 + .../Proxy/AerospikeClientProxy.cs | 64 +- .../Proxy/AsyncClientProxy.cs | 40 +- AerospikeClientProxy/Proxy/BatchProxy.cs | 6 +- AerospikeClientProxy/Proxy/GRPCCommand.cs | 9 +- 34 files changed, 3080 insertions(+), 414 deletions(-) create mode 100644 AerospikeClient/Command/SyncWriteCommand.cs create mode 100644 AerospikeClient/Command/TranAddKeys.cs create mode 100644 AerospikeClient/Command/TranClose.cs create mode 100644 AerospikeClient/Command/TranMarkRollForward.cs create mode 100644 AerospikeClient/Command/TranMonitor.cs create mode 100644 AerospikeClient/Command/TranRoll.cs create mode 100644 AerospikeClient/Listener/AbortListener.cs create mode 100644 AerospikeClient/Listener/CommitListener.cs create mode 100644 AerospikeClient/Main/AbortError.cs create mode 100644 AerospikeClient/Main/CommitError.cs create mode 100644 AerospikeClient/Main/Tran.cs create mode 100644 AerospikeClient/Policy/TranRollPolicy.cs create mode 100644 AerospikeClient/Policy/TranVerifyPolicy.cs diff --git a/AerospikeClient/Async/AsyncBatch.cs b/AerospikeClient/Async/AsyncBatch.cs index 8f3a7b75..af0561af 100644 --- a/AerospikeClient/Async/AsyncBatch.cs +++ b/AerospikeClient/Async/AsyncBatch.cs @@ -86,7 +86,7 @@ protected internal override void WriteBuffer() { if (batch.node.HasBatchAny) { - SetBatchOperate(batchPolicy, records, batch); + SetBatchOperate(batchPolicy, null, null, null, records, batch); } else { @@ -96,10 +96,10 @@ protected internal override void WriteBuffer() protected internal override void ParseRow() { - SkipKey(fieldCount); - BatchRead record = records[batchIndex]; + ParseFieldsRead(record.key); + if (resultCode == 0) { record.SetRecord(ParseRecord()); @@ -197,7 +197,7 @@ protected internal override void WriteBuffer() { if (batch.node.HasBatchAny) { - SetBatchOperate(batchPolicy, records, batch); + SetBatchOperate(batchPolicy, null, null, null, records, batch); } else { @@ -207,10 +207,10 @@ protected internal override void WriteBuffer() protected internal override void ParseRow() { - SkipKey(fieldCount); - BatchRead record = records[batchIndex]; + ParseFieldsRead(record.key); + if (resultCode == 0) { record.SetRecord(ParseRecord()); @@ -330,7 +330,7 @@ protected internal override void WriteBuffer() { if (batch.node.HasBatchAny) { - BatchAttr attr = new BatchAttr(batchPolicy, readAttr, ops); + BatchAttr attr = new(batchPolicy, readAttr, ops); SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); } else @@ -341,7 +341,7 @@ protected internal override void WriteBuffer() protected internal override void ParseRow() { - SkipKey(fieldCount); + ParseFieldsRead(keys[batchIndex]); if (resultCode == 0) { @@ -453,7 +453,7 @@ protected internal override void WriteBuffer() { if (batch.node.HasBatchAny) { - BatchAttr attr = new BatchAttr(batchPolicy, readAttr, ops); + BatchAttr attr = new(batchPolicy, readAttr, ops); SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); } else @@ -464,10 +464,10 @@ protected internal override void WriteBuffer() protected internal override void ParseRow() { - SkipKey(fieldCount); - Key keyOrig = keys[batchIndex]; + ParseFieldsRead(keyOrig); + if (resultCode == 0) { Record record = ParseRecord(); @@ -570,7 +570,7 @@ protected internal override void WriteBuffer() { if (batch.node.HasBatchAny) { - BatchAttr attr = new BatchAttr(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); + BatchAttr attr = new(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); SetBatchOperate(batchPolicy, keys, batch, null, null, attr); } else @@ -581,13 +581,13 @@ protected internal override void WriteBuffer() protected internal override void ParseRow() { - SkipKey(fieldCount); - if (opCount > 0) { throw new AerospikeException.Parse("Received bins that were not requested!"); } + ParseFieldsRead(keys[batchIndex]); + existsArray[batchIndex] = resultCode == 0; } @@ -679,7 +679,7 @@ protected internal override void WriteBuffer() { if (batch.node.HasBatchAny) { - BatchAttr attr = new BatchAttr(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); + BatchAttr attr = new(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); SetBatchOperate(batchPolicy, keys, batch, null, null, attr); } else @@ -690,14 +690,13 @@ protected internal override void WriteBuffer() protected internal override void ParseRow() { - SkipKey(fieldCount); - if (opCount > 0) { throw new AerospikeException.Parse("Received bins that were not requested!"); } Key keyOrig = keys[batchIndex]; + ParseFieldsRead(keyOrig); listener.OnExists(keyOrig, resultCode == 0); } @@ -791,15 +790,15 @@ protected internal override bool IsWrite() protected internal override void WriteBuffer() { - SetBatchOperate(batchPolicy, records, batch); + SetBatchOperate(batchPolicy, null, null, null, records, batch); } protected internal override void ParseRow() { - SkipKey(fieldCount); - BatchRecord record = records[batchIndex]; + ParseFields(record.key, record.hasWrite); + if (resultCode == 0) { record.SetRecord(ParseRecord()); @@ -936,15 +935,15 @@ protected internal override bool IsWrite() protected internal override void WriteBuffer() { - SetBatchOperate(batchPolicy, records, batch); + SetBatchOperate(batchPolicy, null, null, null, records, batch); } protected internal override void ParseRow() { - SkipKey(fieldCount); - BatchRecord record = records[batchIndex]; + ParseFields(record.key, record.hasWrite); + if (resultCode == 0) { record.SetRecord(ParseRecord()); @@ -1105,10 +1104,10 @@ protected internal override void WriteBuffer() protected internal override void ParseRow() { - SkipKey(fieldCount); - BatchRecord record = records[batchIndex]; + ParseFields(record.key, record.hasWrite); + if (resultCode == 0) { record.SetRecord(ParseRecord()); @@ -1191,7 +1190,7 @@ BatchAttr attr public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) { - BatchRecord record = new BatchRecord(key, null, ae.Result, inDoubt, hasWrite); + BatchRecord record = new(key, null, ae.Result, inDoubt, hasWrite); sent[index] = true; AsyncBatch.OnRecord(cluster, listener, record, index); } @@ -1256,9 +1255,10 @@ protected internal override void WriteBuffer() protected internal override void ParseRow() { - SkipKey(fieldCount); - Key keyOrig = keys[batchIndex]; + + ParseFields(keyOrig, attr.hasWrite); + BatchRecord record; if (resultCode == 0) @@ -1281,7 +1281,7 @@ internal override void SetInDoubt(bool inDoubt) if (!sent[index]) { Key key = keys[index]; - BatchRecord record = new BatchRecord(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); + BatchRecord record = new(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); sent[index] = true; AsyncBatch.OnRecord(cluster, listener, record, index); } @@ -1410,10 +1410,10 @@ protected internal override void WriteBuffer() protected internal override void ParseRow() { - SkipKey(fieldCount); - BatchRecord record = records[batchIndex]; + ParseFields(record.key, record.hasWrite); + if (resultCode == 0) { record.SetRecord(ParseRecord()); @@ -1513,7 +1513,7 @@ BatchAttr attr public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) { - BatchRecord record = new BatchRecord(key, null, ae.Result, inDoubt, hasWrite); + BatchRecord record = new(key, null, ae.Result, inDoubt, hasWrite); sent[index] = true; AsyncBatch.OnRecord(cluster, listener, record, index); } @@ -1586,9 +1586,10 @@ protected internal override void WriteBuffer() protected internal override void ParseRow() { - SkipKey(fieldCount); - Key keyOrig = keys[batchIndex]; + + ParseFields(keyOrig, attr.hasWrite); + BatchRecord record; if (resultCode == 0) @@ -1626,7 +1627,7 @@ internal override void SetInDoubt(bool inDoubt) if (!sent[index]) { Key key = keys[index]; - BatchRecord record = new BatchRecord(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); + BatchRecord record = new(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); sent[index] = true; AsyncBatch.OnRecord(cluster, listener, record, index); } @@ -1685,7 +1686,7 @@ public void Retry(AsyncMultiCommand[] commands) max += commands.Length - 1; } - foreach (AsyncBatchCommand command in commands) + foreach (AsyncBatchCommand command in commands.Cast()) { command.ExecuteBatchRetry(); } @@ -1807,6 +1808,40 @@ protected override Latency.LatencyType GetLatencyType() return Latency.LatencyType.BATCH; } + protected void ParseFieldsRead(Key key) + { + if (policy.Tran != null) + { + long? version = ParseVersion(fieldCount); + policy.Tran.OnRead(key, version); + } + else + { + SkipKey(fieldCount); + } + } + + protected void ParseFields(Key key, bool hasWrite) + { + if (policy.Tran != null) + { + long? version = ParseVersion(fieldCount); + + if (hasWrite) + { + policy.Tran.OnWrite(key, version, resultCode); + } + else + { + policy.Tran.OnRead(key, version); + } + } + else + { + SkipKey(fieldCount); + } + } + protected internal override bool PrepareRetry(bool timeout) { if (!(policy.replica == Replica.SEQUENCE || policy.replica == Replica.PREFER_RACK)) diff --git a/AerospikeClient/Async/AsyncClient.cs b/AerospikeClient/Async/AsyncClient.cs index cd196d49..af766393 100644 --- a/AerospikeClient/Async/AsyncClient.cs +++ b/AerospikeClient/Async/AsyncClient.cs @@ -128,6 +128,52 @@ public AsyncClient(AsyncClientPolicy policy, params Host[] hosts) base.cluster = this.cluster; } + //------------------------------------------------------- + // Multi-Record Transactions + //------------------------------------------------------- + + /// + /// Asynchronously attempt to commit the given multi-record transaction. First, the expected + /// record versions are sent to the server nodes for verification.If all nodes return success, + /// the transaction is committed.Otherwise, the transaction is aborted. + ///

+ /// This method registers the command with an event loop and returns. + /// The event loop thread will process the command and send the results to the listener. + ///

+ /// Requires server version 8.0+ + ///

+ ///
+ /// where to send results + /// multi-record transaction + public void Commit(CommitListener listener, Tran tran) + { + tran.SetRollAttempted(); + + //AsyncTranRoll tm = new AsyncTranRoll( + //cluster, tranVerifyPolicyDefault, tranRollPolicyDefault, tran + //); + //tm.Commit(listener); + } + + /// + /// Asynchronously abort and rollback the given multi-record transaction. + ///

+ /// This method registers the command with an event loop and returns. + /// The event loop thread will process the command and send the results to the listener. + ///

+ /// Requires server version 8.0+ + ///

+ ///
+ /// where to send results + /// multi-record transaction + public void Abort(AbortListener listener, Tran tran) + { + tran.SetRollAttempted(); + + //AsyncTranRoll tm = new AsyncTranRoll(cluster, null, tranRollPolicyDefault, tran); + //tm.Abort(listener); + } + //------------------------------------------------------- // Write Record Operations //------------------------------------------------------- @@ -1115,7 +1161,7 @@ public Task Operate(WritePolicy policy, CancellationToken token, Key key /// if queue is full public void Operate(WritePolicy policy, RecordListener listener, Key key, params Operation[] ops) { - OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, key, ops); + OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, ops); AsyncOperate async = new AsyncOperate(cluster, listener, key, args); async.Execute(); } diff --git a/AerospikeClient/Async/IAsyncClient.cs b/AerospikeClient/Async/IAsyncClient.cs index 3b82b58d..70d130aa 100644 --- a/AerospikeClient/Async/IAsyncClient.cs +++ b/AerospikeClient/Async/IAsyncClient.cs @@ -14,9 +14,6 @@ * License for the specific language governing permissions and limitations under * the License. */ -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; namespace Aerospike.Client { @@ -39,7 +36,39 @@ namespace Aerospike.Client /// /// public interface IAsyncClient : IAerospikeClient - { + { + //------------------------------------------------------- + // Multi-Record Transactions + //------------------------------------------------------- + + /// + /// Asynchronously attempt to commit the given multi-record transaction. First, the expected + /// record versions are sent to the server nodes for verification.If all nodes return success, + /// the transaction is committed.Otherwise, the transaction is aborted. + ///

+ /// This method registers the command with an event loop and returns. + /// The event loop thread will process the command and send the results to the listener. + ///

+ /// Requires server version 8.0+ + ///

+ ///
+ /// where to send results + /// multi-record transaction + void Commit(CommitListener listener, Tran tran); + + /// + /// Asynchronously abort and rollback the given multi-record transaction. + ///

+ /// This method registers the command with an event loop and returns. + /// The event loop thread will process the command and send the results to the listener. + ///

+ /// Requires server version 8.0+ + ///

+ ///
+ /// + /// + void Abort(AbortListener listener, Tran tran); + //------------------------------------------------------- // Write Record Operations //------------------------------------------------------- diff --git a/AerospikeClient/Command/Batch.cs b/AerospikeClient/Command/Batch.cs index 4ecbb5c8..fee63f92 100644 --- a/AerospikeClient/Command/Batch.cs +++ b/AerospikeClient/Command/Batch.cs @@ -42,7 +42,7 @@ protected internal override void WriteBuffer() { if (batch.node != null && batch.node.HasBatchAny) { - SetBatchOperate(batchPolicy, records, batch); + SetBatchOperate(batchPolicy, null, null, null, records, batch); } else { @@ -52,10 +52,10 @@ protected internal override void WriteBuffer() protected internal override bool ParseRow() { - SkipKey(fieldCount); - BatchRead record = records[batchIndex]; + ParseFieldsRead(record.key); + if (resultCode == 0) { record.SetRecord(ParseRecord()); @@ -116,7 +116,7 @@ protected internal override void WriteBuffer() { if (batch.node != null && batch.node.HasBatchAny) { - BatchAttr attr = new BatchAttr(policy, readAttr, ops); + BatchAttr attr = new(policy, readAttr, ops); SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); } else @@ -127,7 +127,7 @@ protected internal override void WriteBuffer() protected internal override bool ParseRow() { - SkipKey(fieldCount); + ParseFieldsRead(keys[batchIndex]); if (resultCode == 0) { @@ -174,7 +174,7 @@ protected internal override void WriteBuffer() { if (batch.node != null && batch.node.HasBatchAny) { - BatchAttr attr = new BatchAttr(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA); + BatchAttr attr = new(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA); SetBatchOperate(batchPolicy, keys, batch, null, null, attr); } else @@ -185,7 +185,7 @@ protected internal override void WriteBuffer() protected internal override bool ParseRow() { - SkipKey(fieldCount); + ParseFieldsRead(keys[batchIndex]); if (opCount > 0) { @@ -236,15 +236,15 @@ protected internal override bool IsWrite() protected internal override void WriteBuffer() { - SetBatchOperate(batchPolicy, (IList)records, batch); + SetBatchOperate(batchPolicy, null, null, null, (IList)records, batch); } protected internal override bool ParseRow() { - SkipKey(fieldCount); - BatchRecord record = records[batchIndex]; + ParseFields(record); + if (resultCode == 0) { record.SetRecord(ParseRecord()); @@ -342,10 +342,10 @@ protected internal override void WriteBuffer() protected internal override bool ParseRow() { - SkipKey(fieldCount); - BatchRecord record = records[batchIndex]; + ParseFields(record); + if (resultCode == 0) { record.SetRecord(ParseRecord()); @@ -434,10 +434,10 @@ protected internal override void WriteBuffer() protected internal override bool ParseRow() { - SkipKey(fieldCount); - BatchRecord record = records[batchIndex]; + ParseFields(record); + if (resultCode == 0) { record.SetRecord(ParseRecord()); @@ -494,6 +494,151 @@ protected internal override List GenerateBatchNodes() } } + //------------------------------------------------------- + // MRT + //------------------------------------------------------- + + public sealed class BatchTranVerify : BatchCommand + { + private readonly Tran tran; + private readonly Key[] keys; + private readonly long[] versions; + private readonly BatchRecord[] records; + + public BatchTranVerify( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Tran tran, + Key[] keys, + long[] versions, + BatchRecord[] records, + BatchStatus status + ) : base(cluster, batch, batchPolicy, status, false) + { + this.tran = tran; + this.keys = keys; + this.versions = versions; + this.records = records; + } + + protected internal override bool IsWrite() + { + return false; + } + + protected internal override void WriteBuffer() + { + SetBatchTranVerify(batchPolicy, tran, keys, versions, batch); + } + + protected internal override bool ParseRow() + { + SkipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == 0) + { + record.resultCode = resultCode; + } + else + { + record.SetError(resultCode, false); + status.SetRowError(); + } + return true; + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchTranVerify(cluster, batchNode, batchPolicy, tran, keys, versions, records, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, false, status); + } + } + + public sealed class BatchTranRoll : BatchCommand + { + private readonly Key[] keys; + private readonly BatchRecord[] records; + private readonly BatchAttr attr; + + public BatchTranRoll( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + BatchRecord[] records, + BatchAttr attr, + BatchStatus status + ) : base(cluster, batch, batchPolicy, status, false) + { + this.keys = keys; + this.records = records; + this.attr = attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchTranRoll(batchPolicy, keys, batch, attr); + } + + protected internal override bool ParseRow() + { + SkipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == 0) + { + record.resultCode = resultCode; + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + status.SetRowError(); + } + return true; + } + + protected internal override void SetInDoubt(bool inDoubt) + { + if (!inDoubt || !attr.hasWrite) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = true; + } + } + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchTranRoll(cluster, batchNode, batchPolicy, keys, records, attr, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status); + } + } + //------------------------------------------------------- // Batch Base Command //------------------------------------------------------- @@ -554,6 +699,40 @@ public void Run(object obj) } } + protected void ParseFieldsRead(Key key) + { + if (policy.Tran != null) + { + long? version = ParseVersion(fieldCount); + policy.Tran.OnRead(key, version); + } + else + { + SkipKey(fieldCount); + } + } + + protected void ParseFields(BatchRecord br) + { + if (policy.Tran != null) + { + long? version = ParseVersion(fieldCount); + + if (br.hasWrite) + { + policy.Tran.OnWrite(br.key, version, resultCode); + } + else + { + policy.Tran.OnRead(br.key, version); + } + } + else + { + SkipKey(fieldCount); + } + } + protected override Latency.LatencyType GetLatencyType() { return Latency.LatencyType.BATCH; diff --git a/AerospikeClient/Command/BatchAttr.cs b/AerospikeClient/Command/BatchAttr.cs index 65c13b13..42839e40 100644 --- a/AerospikeClient/Command/BatchAttr.cs +++ b/AerospikeClient/Command/BatchAttr.cs @@ -22,7 +22,9 @@ public sealed class BatchAttr public int readAttr; public int writeAttr; public int infoAttr; + public int tranAttr; public int expiration; + public int opSize; public short generation; public bool hasWrite; public bool sendKey; @@ -394,5 +396,30 @@ public void SetDelete(BatchDeletePolicy dp) infoAttr |= Command.INFO3_COMMIT_MASTER; } } + + public void SetOpSize(Operation[] ops) + { + int dataOffset = 0; + + foreach (Operation op in ops) + { + dataOffset += ByteUtil.EstimateSizeUtf8(op.binName) + Command.OPERATION_HEADER_SIZE; + dataOffset += op.value.EstimateSize(); + } + opSize = dataOffset; + } + + public void SetTran(int attr) + { + filterExp = null; + readAttr = 0; + writeAttr = Command.INFO2_WRITE | Command.INFO2_RESPOND_ALL_OPS | Command.INFO2_DURABLE_DELETE; + infoAttr = 0; + tranAttr = attr; + expiration = 0; + generation = 0; + hasWrite = true; + sendKey = false; + } } } diff --git a/AerospikeClient/Command/ByteUtil.cs b/AerospikeClient/Command/ByteUtil.cs index fbb2a2e5..c01faea0 100644 --- a/AerospikeClient/Command/ByteUtil.cs +++ b/AerospikeClient/Command/ByteUtil.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -424,15 +424,52 @@ public static long LittleBytesToLong(byte[] buf, int offset) ); } - //------------------------------------------------------- - // 32 bit number conversions. - //------------------------------------------------------- - - /// - /// Convert int to big endian 32 bits. - /// The bit pattern will be the same regardless of sign. + //------------------------------------------------------- + // Transaction version conversions. + //------------------------------------------------------- + + /// + /// Convert long to a 7 byte record version for MRT. + /// + public static void LongToVersionBytes(long v, byte[] buf, int offset) + { + buf[offset++] = (byte)(v >> 0); + buf[offset++] = (byte)(v >> 8); + buf[offset++] = (byte)(v >> 16); + buf[offset++] = (byte)(v >> 24); + buf[offset++] = (byte)(v >> 32); + buf[offset++] = (byte)(v >> 40); + buf[offset] = (byte)(v >> 48); + } + + /// + /// Convert 7 byte record version to a long for MRT. /// - public static int IntToBytes(uint v, byte[] buf, int offset) + /// + /// + /// + public static long VersionBytesToLong(byte[] buf, int offset) + { + return ( + ((long)(buf[offset] & 0xFF) << 0) | + ((long)(buf[offset + 1] & 0xFF) << 8) | + ((long)(buf[offset + 2] & 0xFF) << 16) | + ((long)(buf[offset + 3] & 0xFF) << 24) | + ((long)(buf[offset + 4] & 0xFF) << 32) | + ((long)(buf[offset + 5] & 0xFF) << 40) | + ((long)(buf[offset + 6] & 0xFF) << 48) + ); + } + + //------------------------------------------------------- + // 32 bit number conversions. + //------------------------------------------------------- + + /// + /// Convert int to big endian 32 bits. + /// The bit pattern will be the same regardless of sign. + /// + public static int IntToBytes(uint v, byte[] buf, int offset) { // Benchmarks show that custom conversion is faster than System.BitConverter.GetBytes(). // Assume little endian machine and reverse/convert in one pass. diff --git a/AerospikeClient/Command/Command.cs b/AerospikeClient/Command/Command.cs index 0f0fe138..2b50cfe5 100644 --- a/AerospikeClient/Command/Command.cs +++ b/AerospikeClient/Command/Command.cs @@ -63,6 +63,10 @@ public abstract class Command // 1 0 allow replica // 1 1 allow unavailable + public const int INFO4_MRT_VERIFY_READ = (1 << 0); // Send MRT version to the server to be verified. + public const int INFO4_MRT_ROLL_FORWARD = (1 << 1); // Roll forward MRT. + public const int INFO4_MRT_ROLL_BACK = (1 << 2); // Roll back MRT. + public const byte STATE_READ_AUTH_HEADER = 1; public const byte STATE_READ_HEADER = 2; public const byte STATE_READ_DETAIL = 3; @@ -73,12 +77,12 @@ public abstract class Command public const byte BATCH_MSG_INFO = 0x2; public const byte BATCH_MSG_GEN = 0x4; public const byte BATCH_MSG_TTL = 0x8; + public const byte BATCH_MSG_INFO4 = 0x10; public const int MSG_TOTAL_HEADER_SIZE = 30; public const int FIELD_HEADER_SIZE = 5; public const int OPERATION_HEADER_SIZE = 8; public const int MSG_REMAINING_HEADER_SIZE = 22; - public const int DIGEST_SIZE = 20; public const int COMPRESS_THRESHOLD = 128; public const ulong CL_MSG_VERSION = 2UL; public const ulong AS_MSG_TYPE = 3UL; @@ -90,6 +94,7 @@ public abstract class Command internal readonly int serverTimeout; internal int socketTimeout; internal int totalTimeout; + internal long? Version; public Command(int socketTimeout, int totalTimeout, int maxRetries) { @@ -108,6 +113,337 @@ public Command(int socketTimeout, int totalTimeout, int maxRetries) } } + //-------------------------------------------------- + // Multi-record Transactions + //-------------------------------------------------- + + public void SetTranAddKeys(WritePolicy policy, Key key, OperateArgs args) + { + Begin(); + int fieldCount = EstimateKeySize(key); + dataOffset += args.size; + WriteTranMonitor(key, args.readAttr, args.writeAttr, fieldCount, args.operations.Length); + + foreach (Operation operation in args.operations) + { + WriteOperation(operation); + } + End(policy.compress); + } + + public void SetTranVerify(Tran tran, Key key, long ver) + { + Begin(); + int fieldCount = EstimateKeySize(key); + + // Version field. + dataOffset += 7; + fieldCount++; + + SizeBuffer(); + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA); + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)Command.INFO3_SC_READ_TYPE; + dataBuffer[dataOffset++] = (byte)Command.INFO4_MRT_VERIFY_READ; + dataBuffer[dataOffset++] = 0; + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); + + WriteKey(key); + WriteFieldVersion(ver); + End(); + } + + public void SetBatchTranVerify( + BatchPolicy policy, + Tran tran, + Key[] keys, + long[] versions, + BatchNode batch + ) + { + BatchOffsetsNative offsets = new(batch); + SetBatchTranVerify(policy, tran, keys, versions, offsets); + } + + public void SetBatchTranVerify( + BatchPolicy policy, + Tran tran, + Key[] keys, + long[] versions, + BatchOffsets offsets + ) + { + // Estimate buffer size. + Begin(); + + // Batch field + dataOffset += FIELD_HEADER_SIZE + 5; + + Key prev = null; + int max = offsets.Size(); + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions[offset]; + + dataOffset += key.digest.Length + 4; + + if (CanRepeat(key, prev, ver)) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Write full header and namespace/set/bin names. + dataOffset += 9; // header(4) + fieldCount(2) + opCount(2) = 9 + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + + if (ver != null) + { + dataOffset += 7 + FIELD_HEADER_SIZE; + } + prev = key; + } + } + + SizeBuffer(); + + WriteBatchHeader(policy, totalTimeout, 1); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + dataOffset += ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataBuffer[dataOffset++] = GetBatchFlags(policy); + prev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions[offset]; + + dataOffset += ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + if (CanRepeat(key, prev, ver)) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full message. + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4); + dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA); + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)Command.INFO3_SC_READ_TYPE; + dataBuffer[dataOffset++] = (byte)Command.INFO4_MRT_VERIFY_READ; + + int fieldCount = 0; + + if (ver.HasValue) + { + fieldCount++; + } + + WriteBatchFields(key, fieldCount, 0); + + if (ver.HasValue) + { + WriteFieldVersion(ver.Value); + } + + prev = key; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + End(policy.compress); + } + + public void SetTranMarkRollForward(Tran tran, Key key) + { + Bin bin = new("fwd", true); + + Begin(); + int fieldCount = EstimateKeySize(key); + EstimateOperationSize(bin); + WriteTranMonitor(key, 0, Command.INFO2_WRITE, fieldCount, 1); + WriteOperation(bin, Operation.Type.WRITE); + End(); + } + + public void SetTranRoll(Key key, Tran tran, int tranAttr) + { + Begin(); + int fieldCount = EstimateKeySize(key); + + fieldCount += SizeTran(key, tran, false); + + SizeBuffer(); + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)(Command.INFO2_WRITE | Command.INFO2_DURABLE_DELETE); + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)tranAttr; + dataBuffer[dataOffset++] = 0; // clear the result code + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); + + WriteKey(key); + WriteTran(tran, false); + End(); + } + + public void SetBatchTranRoll( + BatchPolicy policy, + Key[] keys, + BatchNode batch, + BatchAttr attr + ) + { + BatchOffsetsNative offsets = new(batch); + SetBatchTranRoll(policy, keys, attr, offsets); + } + + public void SetBatchTranRoll( + BatchPolicy policy, + Key[] keys, + BatchAttr attr, + BatchOffsets offsets + ) + { + // Estimate buffer size. + Begin(); + int fieldCount = 1; + int max = offsets.Size(); + Tran tran = policy.Tran; + long[] versions = new long[max]; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + versions[i] = tran.GetReadVersion(key); + } + + // Batch field + dataOffset += FIELD_HEADER_SIZE + 5; + + Key prev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long ver = versions[i]; + + dataOffset += key.digest.Length + 4; + + if (CanRepeat(key, prev, ver)) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Write full header and namespace/set/bin names. + dataOffset += 13; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 13 + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + SizeTranBatch(tran, ver); + dataOffset += 2; // gen(2) = 2 + prev = key; + } + } + + SizeBuffer(); + + WriteBatchHeader(policy, totalTimeout, fieldCount); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = GetBatchFlags(policy); + prev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long ver = versions[i]; + + dataOffset += ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + if (CanRepeat(key, prev, ver)) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full message. + WriteBatchWrite(key, tran, ver, attr, null, 0, 0); + prev = key; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + End(policy.compress); + } + + public void SetTranClose(Tran tran, Key key) + { + Begin(); + int fieldCount = EstimateKeySize(key); + WriteTranMonitor(key, 0, Command.INFO2_WRITE | Command.INFO2_DELETE | Command.INFO2_DURABLE_DELETE, + fieldCount, 0); + End(); + } + + private void WriteTranMonitor(Key key, int readAttr, int writeAttr, int fieldCount, int opCount) + { + SizeBuffer(); + + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[dataOffset++] = (byte)readAttr; + dataBuffer[dataOffset++] = (byte)writeAttr; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = 0; + dataBuffer[dataOffset++] = 0; + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)opCount, dataBuffer, dataOffset); + + WriteKey(key); + } + //-------------------------------------------------- // Writes //-------------------------------------------------- @@ -115,7 +451,7 @@ public Command(int socketTimeout, int totalTimeout, int maxRetries) public virtual void SetWrite(WritePolicy policy, Operation.Type operation, Key key, Bin[] bins) { Begin(); - int fieldCount = EstimateKeySize(policy, key); + int fieldCount = EstimateKeySize(policy, key, true); if (policy.filterExp != null) { @@ -131,12 +467,9 @@ public virtual void SetWrite(WritePolicy policy, Operation.Type operation, Key k bool compress = SizeBuffer(policy); WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, bins.Length); - WriteKey(policy, key); + WriteKey(policy, key, true); - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } + policy.filterExp?.Write(this); foreach (Bin bin in bins) { @@ -148,7 +481,7 @@ public virtual void SetWrite(WritePolicy policy, Operation.Type operation, Key k public virtual void SetDelete(WritePolicy policy, Key key) { Begin(); - int fieldCount = EstimateKeySize(policy, key); + int fieldCount = EstimateKeySize(policy, key, true); if (policy.filterExp != null) { @@ -157,19 +490,26 @@ public virtual void SetDelete(WritePolicy policy, Key key) } SizeBuffer(); WriteHeaderWrite(policy, Command.INFO2_WRITE | Command.INFO2_DELETE, fieldCount, 0); - WriteKey(policy, key); + WriteKey(policy, key, true); - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } + policy.filterExp?.Write(this); + End(); + } + + public void SetDelete(Policy policy, Key key, BatchAttr attr) + { + Begin(); + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); + SizeBuffer(); + WriteKeyAttr(policy, key, attr, exp, fieldCount, 0); End(); } public virtual void SetTouch(WritePolicy policy, Key key) { Begin(); - int fieldCount = EstimateKeySize(policy, key); + int fieldCount = EstimateKeySize(policy, key, true); if (policy.filterExp != null) { @@ -179,12 +519,9 @@ public virtual void SetTouch(WritePolicy policy, Key key) EstimateOperationSize(); SizeBuffer(); WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 1); - WriteKey(policy, key); + WriteKey(policy, key, true); - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } + policy.filterExp?.Write(this); WriteOperation(Operation.Type.TOUCH); End(); } @@ -196,7 +533,7 @@ public virtual void SetTouch(WritePolicy policy, Key key) public virtual void SetExists(Policy policy, Key key) { Begin(); - int fieldCount = EstimateKeySize(policy, key); + int fieldCount = EstimateKeySize(policy, key, false); if (policy.filterExp != null) { @@ -205,93 +542,174 @@ public virtual void SetExists(Policy policy, Key key) } SizeBuffer(); WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0); - WriteKey(policy, key); + WriteKey(policy, key, false); - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } + policy.filterExp?.Write(this); End(); } - public virtual void SetRead(Policy policy, Key key) + public virtual void SetRead(Policy policy, Key key, string[] binNames) { + int readAttr = Command.INFO1_READ; + int opCount = 0; + + if (binNames != null && binNames.Length > 0) + { + opCount = binNames.Length; + } + else + { + readAttr |= Command.INFO1_GET_ALL; + } + Begin(); - int fieldCount = EstimateKeySize(policy, key); + int fieldCount = EstimateKeySize(policy, key, false); if (policy.filterExp != null) { dataOffset += policy.filterExp.Size(); fieldCount++; } + + if (opCount != 0) + { + foreach (string binName in binNames) + { + EstimateOperationSize(binName); + } + } + SizeBuffer(); - WriteHeaderRead(policy, serverTimeout, Command.INFO1_READ | Command.INFO1_GET_ALL, 0, 0, fieldCount, 0); - WriteKey(policy, key); + WriteHeaderRead(policy, serverTimeout, readAttr, 0, 0, fieldCount, opCount); + WriteKey(policy, key, false); - if (policy.filterExp != null) + policy.filterExp?.Write(this); + + if (opCount != 0) { - policy.filterExp.Write(this); + foreach (string binName in binNames) + { + WriteOperation(binName, Operation.Type.READ); + } } End(); } - public virtual void SetRead(Policy policy, Key key, string[] binNames) + public void SetRead(Policy policy, BatchRead br) { - if (binNames != null) + Begin(); + + BatchReadPolicy rp = br.policy; + BatchAttr attr = new(); + Expression exp; + int opCount; + + if (rp != null) + { + attr.SetRead(rp); + exp = rp.filterExp ?? policy.filterExp; + } + else { - Begin(); - int fieldCount = EstimateKeySize(policy, key); + attr.SetRead(policy); + exp = policy.filterExp; + } - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } + if (br.binNames != null) + { + opCount = br.binNames.Length; - foreach (string binName in binNames) + foreach (string binName in br.binNames) { EstimateOperationSize(binName); } - SizeBuffer(); - WriteHeaderRead(policy, serverTimeout, Command.INFO1_READ, 0, 0, fieldCount, binNames.Length); - WriteKey(policy, key); + } + else if (br.ops != null) + { + attr.AdjustRead(br.ops); + opCount = br.ops.Length; - if (policy.filterExp != null) + foreach (Operation op in br.ops) { - policy.filterExp.Write(this); + if (Operation.IsWrite(op.type)) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in read"); + } + EstimateOperationSize(op); } + } + else + { + attr.AdjustRead(br.readAllBins); + opCount = 0; + } - foreach (string binName in binNames) + int fieldCount = EstimateKeyAttrSize(policy, br.key, attr, exp); + + SizeBuffer(); + WriteKeyAttr(policy, br.key, attr, exp, fieldCount, opCount); + + if (br.binNames != null) + { + foreach (string binName in br.binNames) { WriteOperation(binName, Operation.Type.READ); } - End(); } - else + else if (br.ops != null) + { + foreach (Operation op in br.ops) + { + WriteOperation(op); + } + } + End(); + } + + public void SetRead(Policy policy, Key key, Operation[] ops) + { + Begin(); + + BatchAttr attr = new BatchAttr(); + attr.SetRead(policy); + attr.AdjustRead(ops); + + int fieldCount = EstimateKeyAttrSize(policy, key, attr, policy.filterExp); + + foreach (Operation op in ops) { - SetRead(policy, key); + if (Operation.IsWrite(op.type)) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in read"); + } + EstimateOperationSize(op); } + + SizeBuffer(); + WriteKeyAttr(policy, key, attr, policy.filterExp, fieldCount, ops.Length); + + foreach (Operation op in ops) + { + WriteOperation(op); + } + End(); } public virtual void SetReadHeader(Policy policy, Key key) { Begin(); - int fieldCount = EstimateKeySize(policy, key); + int fieldCount = EstimateKeySize(policy, key, false); if (policy.filterExp != null) { dataOffset += policy.filterExp.Size(); fieldCount++; } - EstimateOperationSize((string)null); SizeBuffer(); WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0); - WriteKey(policy, key); + WriteKey(policy, key, false); - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } + policy.filterExp?.Write(this); End(); } @@ -302,7 +720,7 @@ public virtual void SetReadHeader(Policy policy, Key key) public virtual void SetOperate(WritePolicy policy, Key key, OperateArgs args) { Begin(); - int fieldCount = EstimateKeySize(policy, key); + int fieldCount = EstimateKeySize(policy, key, args.hasWrite); if (policy.filterExp != null) { @@ -314,12 +732,9 @@ public virtual void SetOperate(WritePolicy policy, Key key, OperateArgs args) bool compress = SizeBuffer(policy); WriteHeaderReadWrite(policy, args, fieldCount); - WriteKey(policy, key); + WriteKey(policy, key, args.hasWrite); - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } + policy.filterExp?.Write(this); foreach (Operation operation in args.operations) { @@ -328,6 +743,24 @@ public virtual void SetOperate(WritePolicy policy, Key key, OperateArgs args) End(compress); } + public void SetOperate(Policy policy, BatchAttr attr, Key key, Operation[] ops) + { + Begin(); + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); + + dataOffset += attr.opSize; + SizeBuffer(); + WriteKeyAttr(policy, key, attr, exp, fieldCount, ops.Length); + + foreach (Operation op in ops) + { + WriteOperation(op); + } + End(policy.compress); + } + + //-------------------------------------------------- // UDF //-------------------------------------------------- @@ -335,7 +768,7 @@ public virtual void SetOperate(WritePolicy policy, Key key, OperateArgs args) public virtual void SetUdf(WritePolicy policy, Key key, string packageName, string functionName, Value[] args) { Begin(); - int fieldCount = EstimateKeySize(policy, key); + int fieldCount = EstimateKeySize(policy, key, true); if (policy.filterExp != null) { @@ -348,7 +781,7 @@ public virtual void SetUdf(WritePolicy policy, Key key, string packageName, stri bool compress = SizeBuffer(policy); WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 0); - WriteKey(policy, key); + WriteKey(policy, key, true); if (policy.filterExp != null) { @@ -360,6 +793,27 @@ public virtual void SetUdf(WritePolicy policy, Key key, string packageName, stri End(compress); } + public void SetUdf(Policy policy, BatchAttr attr, Key key, string packageName, string functionName, Value[] args) + { + byte[] argBytes = Packer.Pack(args); + SetUdf(policy, attr, key, packageName, functionName, argBytes); + } + + public void SetUdf(Policy policy, BatchAttr attr, Key key, string packageName, string functionName, byte[] argBytes) + { + Begin(); + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); + fieldCount += EstimateUdfSize(packageName, functionName, argBytes); + + SizeBuffer(); + WriteKeyAttr(policy, key, attr, exp, fieldCount, 0); + WriteField(packageName, FieldType.UDF_PACKAGE_NAME); + WriteField(functionName, FieldType.UDF_FUNCTION); + WriteField(argBytes, FieldType.UDF_ARGLIST); + End(policy.compress); + } + //-------------------------------------------------- // Batch Read Only //-------------------------------------------------- @@ -437,16 +891,12 @@ public virtual void SetBatchRead(BatchPolicy policy, List records, Ba WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0); - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } + policy.filterExp?.Write(this); int fieldSizeOffset = dataOffset; WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; + dataOffset += ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; prev = null; @@ -582,24 +1032,19 @@ int readAttr WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0); - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } + policy.filterExp?.Write(this); int fieldSizeOffset = dataOffset; WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; + dataOffset += ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; prev = null; for (int i = 0; i < max; i++) { int index = offsets[i]; - ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); - dataOffset += 4; + dataOffset += ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); Key key = keys[index]; byte[] digest = key.digest; @@ -622,7 +1067,7 @@ int readAttr dataBuffer[dataOffset++] = (byte)readAttr; WriteBatchFields(key, 0, binNames.Length); - foreach (String binName in binNames) + foreach (string binName in binNames) { WriteOperation(binName, Operation.Type.READ); } @@ -651,14 +1096,44 @@ int readAttr // Batch Read/Write Operations //-------------------------------------------------- - public virtual void SetBatchOperate(BatchPolicy policy, IList records, BatchNode batch) + public virtual void SetBatchOperate( + BatchPolicy policy, + BatchWritePolicy writePolicy, + BatchUDFPolicy udfPolicy, + BatchDeletePolicy deletePolicy, + IList records, + BatchNode batch) { - // Estimate full row size - int[] offsets = batch.offsets; - int max = batch.offsetsSize; - BatchRecord prev = null; + BatchOffsetsNative offsets = new BatchOffsetsNative(batch); + SetBatchOperate(policy, writePolicy, udfPolicy, deletePolicy, records, offsets); + } + public void SetBatchOperate( + BatchPolicy policy, + BatchWritePolicy writePolicy, + BatchUDFPolicy udfPolicy, + BatchDeletePolicy deletePolicy, + IList records, + BatchOffsets offsets + ) + { Begin(); + int max = offsets.Size(); + Tran tran = policy.Tran; + long?[] versions = null; + + if (tran != null) + { + versions = new long?[max]; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + BatchRecord record = (BatchRecord)records[offset]; + versions[i] = tran.GetReadVersion(record.key); + } + } + int fieldCount = 1; if (policy.filterExp != null) @@ -669,19 +1144,18 @@ public virtual void SetBatchOperate(BatchPolicy policy, IList records, BatchNode dataOffset += FIELD_HEADER_SIZE + 5; + BatchRecord prev = null; + for (int i = 0; i < max; i++) { - BatchRecord record = (BatchRecord)records[offsets[i]]; + int offset = offsets.Get(i); + BatchRecord record = (BatchRecord)records[offset]; Key key = record.key; + long? ver = versions?[i]; dataOffset += key.digest.Length + 4; - // Avoid relatively expensive full equality checks for performance reasons. - // Use reference equality only in hope that common namespaces/bin names are set from - // fixed variables. It's fine if equality not determined correctly because it just - // results in more space used. The batch will still be correct. - if (!policy.sendKey && prev != null && prev.key.ns == key.ns && - prev.key.setName == key.setName && record.Equals(prev)) + if (CanRepeat(policy, key, record, prev, ver)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; @@ -689,51 +1163,43 @@ public virtual void SetBatchOperate(BatchPolicy policy, IList records, BatchNode else { // Estimate full header, namespace and bin names. - dataOffset += 12; + dataOffset += 13; dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + SizeTranBatch(tran, ver); dataOffset += record.Size(policy); prev = record; } } - - bool compress = SizeBuffer(policy); + SizeBuffer(); WriteBatchHeader(policy, totalTimeout, fieldCount); - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } + policy.filterExp?.Write(this); int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; + dataOffset += ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataBuffer[dataOffset++] = GetBatchFlags(policy); - BatchAttr attr = new BatchAttr(); + BatchAttr attr = new(); prev = null; for (int i = 0; i < max; i++) { - int index = offsets[i]; - ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); - dataOffset += 4; + int offset = offsets.Get(i); + BatchRecord record = (BatchRecord)records[offset]; + long? ver = versions?[i]; + + dataOffset += ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); - BatchRecord record = (BatchRecord)records[index]; Key key = record.key; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; - // Avoid relatively expensive full equality checks for performance reasons. - // Use reference equality only in hope that common namespaces/bin names are set from - // fixed variables. It's fine if equality not determined correctly because it just - // results in more space used. The batch will still be correct. - if (!policy.sendKey && prev != null && prev.key.ns == key.ns && - prev.key.setName == key.setName && record.Equals(prev)) + if (CanRepeat(policy, key, record, prev, ver)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; @@ -744,94 +1210,84 @@ public virtual void SetBatchOperate(BatchPolicy policy, IList records, BatchNode switch (record.GetBatchType()) { case BatchRecord.Type.BATCH_READ: - { - BatchRead br = (BatchRead)record; - - if (br.policy != null) { - attr.SetRead(br.policy); - } - else - { - attr.SetRead(policy); - } + BatchRead br = (BatchRead)record; - if (br.binNames != null) - { - WriteBatchBinNames(key, br.binNames, attr, attr.filterExp); - } - else if (br.ops != null) - { - attr.AdjustRead(br.ops); - WriteBatchOperations(key, br.ops, attr, attr.filterExp); - } - else - { - attr.AdjustRead(br.readAllBins); - WriteBatchRead(key, attr, attr.filterExp, 0); + if (br.policy != null) + { + attr.SetRead(br.policy); + } + else + { + attr.SetRead(policy); + } + + if (br.binNames != null) + { + if (br.binNames.Length > 0) + { + WriteBatchBinNames(key, tran, ver, br.binNames, attr, attr.filterExp); + } + else + { + attr.AdjustRead(true); + WriteBatchRead(key, tran, ver, attr, attr.filterExp, 0); + } + } + else if (br.ops != null) + { + attr.AdjustRead(br.ops); + WriteBatchOperations(key, tran, ver, br.ops, attr, attr.filterExp); + } + else + { + attr.AdjustRead(br.readAllBins); + WriteBatchRead(key, tran, ver, attr, attr.filterExp, 0); + } + break; } - break; - } case BatchRecord.Type.BATCH_WRITE: - { - BatchWrite bw = (BatchWrite)record; - - if (bw.policy != null) { - attr.SetWrite(bw.policy); - } - else - { - attr.SetWrite(policy); + BatchWrite bw = (BatchWrite)record; + BatchWritePolicy bwp = (bw.policy != null) ? bw.policy : writePolicy; + + attr.SetWrite(bwp); + attr.AdjustWrite(bw.ops); + WriteBatchOperations(key, tran, ver, bw.ops, attr, attr.filterExp); + break; } - attr.AdjustWrite(bw.ops); - WriteBatchOperations(key, bw.ops, attr, attr.filterExp); - break; - } case BatchRecord.Type.BATCH_UDF: - { - BatchUDF bu = (BatchUDF)record; - - if (bu.policy != null) - { - attr.SetUDF(bu.policy); - } - else { - attr.SetUDF(policy); + BatchUDF bu = (BatchUDF)record; + BatchUDFPolicy bup = (bu.policy != null) ? bu.policy : udfPolicy; + + attr.SetUDF(bup); + WriteBatchWrite(key, tran, ver, attr, attr.filterExp, 3, 0); + WriteField(bu.packageName, FieldType.UDF_PACKAGE_NAME); + WriteField(bu.functionName, FieldType.UDF_FUNCTION); + WriteField(bu.argBytes, FieldType.UDF_ARGLIST); + break; } - WriteBatchWrite(key, attr, attr.filterExp, 3, 0); - WriteField(bu.packageName, FieldType.UDF_PACKAGE_NAME); - WriteField(bu.functionName, FieldType.UDF_FUNCTION); - WriteField(bu.argBytes, FieldType.UDF_ARGLIST); - break; - } case BatchRecord.Type.BATCH_DELETE: - { - BatchDelete bd = (BatchDelete)record; - - if (bd.policy != null) { - attr.SetDelete(bd.policy); - } - else - { - attr.SetDelete(policy); + BatchDelete bd = (BatchDelete)record; + BatchDeletePolicy bdp = (bd.policy != null) ? bd.policy : deletePolicy; + + attr.SetDelete(bdp); + WriteBatchWrite(key, tran, ver, attr, attr.filterExp, 0, 0); + break; } - WriteBatchWrite(key, attr, attr.filterExp, 0, 0); - break; - } } prev = record; } } // Write real field size. - ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); - End(compress); + ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + End(policy.compress); } public virtual void SetBatchOperate @@ -843,15 +1299,41 @@ public virtual void SetBatchOperate Operation[] ops, BatchAttr attr ) + { + BatchOffsetsNative offsets = new BatchOffsetsNative(batch); + SetBatchOperate(policy, keys, binNames, ops, attr, offsets); + } + + public void SetBatchOperate( + BatchPolicy policy, + Key[] keys, + string[] binNames, + Operation[] ops, + BatchAttr attr, + BatchOffsets offsets + ) { // Estimate full row size - int[] offsets = batch.offsets; - int max = batch.offsetsSize; + int max = offsets.Size(); + Tran tran = policy.Tran; + long?[] versions = null; - // Estimate dataBuffer size. Begin(); - int fieldCount = 1; + + if (tran != null) + { + versions = new long?[max]; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + versions[i] = tran.GetReadVersion(key); + } + } + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = 1; if (exp != null) { @@ -865,13 +1347,13 @@ BatchAttr attr for (int i = 0; i < max; i++) { - Key key = keys[offsets[i]]; + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions?[i]; dataOffset += key.digest.Length + 4; - // Try reference equality in hope that namespace/set for all keys is set from fixed variables. - if (!attr.sendKey && prev != null && prev.ns == key.ns && - prev.setName == key.setName) + if (CanRepeat(key, prev, attr, ver)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; @@ -879,18 +1361,19 @@ BatchAttr attr else { // Write full header and namespace/set/bin names. - dataOffset += 12; // header(4) + ttl(4) + fielCount(2) + opCount(2) = 12 + dataOffset += 13; // header(5) + ttl(4) + fieldCount(2) + opCount(2) = 13 dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + SizeTranBatch(tran, ver); - if (attr.sendKey) + if (attr.sendKey) { dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; } - if (binNames != null) + if (binNames != null) { - foreach (string binName in binNames) + foreach (string binName in binNames) { EstimateOperationSize(binName); } @@ -918,37 +1401,32 @@ BatchAttr attr } } - bool compress = SizeBuffer(policy); + SizeBuffer(); WriteBatchHeader(policy, totalTimeout, fieldCount); - if (exp != null) - { - exp.Write(this); - } + exp?.Write(this); int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; + dataOffset += ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataBuffer[dataOffset++] = GetBatchFlags(policy); prev = null; for (int i = 0; i < max; i++) { - int index = offsets[i]; - ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); - dataOffset += 4; + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions?[i]; + + dataOffset += ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); - Key key = keys[index]; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; - // Try reference equality in hope that namespace/set for all keys is set from fixed variables. - if (!attr.sendKey && prev != null && prev.ns == key.ns && - prev.setName == key.setName) + if (CanRepeat(key, prev, attr, ver)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; @@ -958,31 +1436,30 @@ BatchAttr attr // Write full message. if (binNames != null) { - WriteBatchBinNames(key, binNames, attr, null); + WriteBatchBinNames(key, tran, ver, binNames, attr, null); } else if (ops != null) { - WriteBatchOperations(key, ops, attr, null); + WriteBatchOperations(key, tran, ver, ops, attr, null); } else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) { - WriteBatchWrite(key, attr, null, 0, 0); + WriteBatchWrite(key, tran, ver, attr, null, 0, 0); } else { - WriteBatchRead(key, attr, null, 0); + WriteBatchRead(key, tran, ver, attr, null, 0); } prev = key; } } // Write real field size. - ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); - End(compress); + ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + End(policy.compress); } - public virtual void SetBatchUDF - ( + public virtual void SetBatchUDF( BatchPolicy policy, Key[] keys, BatchNode batch, @@ -992,14 +1469,41 @@ public virtual void SetBatchUDF BatchAttr attr ) { - // Estimate full row size - int[] offsets = batch.offsets; - int max = batch.offsetsSize; + BatchOffsetsNative offsets = new BatchOffsetsNative(batch); + SetBatchUDF(policy, keys, packageName, functionName, argBytes, attr, offsets); + } - // Estimate dataBuffer size. + public virtual void SetBatchUDF + ( + BatchPolicy policy, + Key[] keys, + string packageName, + string functionName, + byte[] argBytes, + BatchAttr attr, + BatchOffsets offsets + ) + { + // Estimate buffer size. Begin(); - int fieldCount = 1; + int max = offsets.Size(); + Tran tran = policy.Tran; + long?[] versions = null; + + if (tran != null) + { + versions = new long?[max]; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + versions[i] = tran.GetReadVersion(key); + } + } + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = 1; if (exp != null) { @@ -1013,13 +1517,13 @@ BatchAttr attr for (int i = 0; i < max; i++) { - Key key = keys[offsets[i]]; + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions?[i]; dataOffset += key.digest.Length + 4; - // Try reference equality in hope that namespace/set for all keys is set from fixed variables. - if (!attr.sendKey && prev != null && prev.ns == key.ns && - prev.setName == key.setName) + if (CanRepeat(key, prev, attr, ver)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; @@ -1027,51 +1531,47 @@ BatchAttr attr else { // Write full header and namespace/set/bin names. - dataOffset += 12; // header(4) + ttl(4) + fielCount(2) + opCount(2) = 12 + dataOffset += 13; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 13 dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + SizeTranBatch(tran, ver); - if (attr.sendKey) + if (attr.sendKey) { dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; } - dataOffset += 2; // gen(2) = 6 + dataOffset += 2; // gen(2) = 2 EstimateUdfSize(packageName, functionName, argBytes); prev = key; } } - bool compress = SizeBuffer(policy); + SizeBuffer(); WriteBatchHeader(policy, totalTimeout, fieldCount); - if (exp != null) - { - exp.Write(this); - } + exp?.Write(this); int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; + dataOffset += ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataBuffer[dataOffset++] = GetBatchFlags(policy); prev = null; for (int i = 0; i < max; i++) { - int index = offsets[i]; - ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); - dataOffset += 4; + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions?[i]; + + dataOffset += ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); - Key key = keys[index]; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; - // Try reference equality in hope that namespace/set for all keys is set from fixed variables. - if (!attr.sendKey && prev != null && prev.ns == key.ns && - prev.setName == key.setName) + if (CanRepeat(key, prev, attr, ver)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; @@ -1079,7 +1579,7 @@ BatchAttr attr else { // Write full message. - WriteBatchWrite(key, attr, null, 3, 0); + WriteBatchWrite(key, tran, ver, attr, null, 3, 0); WriteField(packageName, FieldType.UDF_PACKAGE_NAME); WriteField(functionName, FieldType.UDF_FUNCTION); WriteField(argBytes, FieldType.UDF_ARGLIST); @@ -1088,8 +1588,36 @@ BatchAttr attr } // Write real field size. - ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); - End(compress); + ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + End(policy.compress); + } + + private static bool CanRepeat( + Policy policy, + Key key, + BatchRecord record, + BatchRecord prev, + long? ver + ) + { + // Avoid relatively expensive full equality checks for performance reasons. + // Use reference equality only in hope that common namespaces/bin names are set from + // fixed variables. It's fine if equality not determined correctly because it just + // results in more space used. The batch will still be correct. + return ver == null && !policy.sendKey && prev != null && prev.key.ns == key.ns && + prev.key.setName == key.setName && record.Equals(prev); + } + + private static bool CanRepeat(Key key, Key prev, BatchAttr attr, long? ver) + { + return ver == null && !attr.sendKey && prev != null && prev.ns == key.ns && + prev.setName == key.setName; + } + + private static bool CanRepeat(Key key, Key prev, long? ver) + { + return ver == null && prev != null && prev.ns == key.ns && + prev.setName == key.setName; } private static Expression GetBatchExpression(Policy policy, BatchAttr attr) @@ -1118,6 +1646,24 @@ private static byte GetBatchFlags(BatchPolicy policy) return flags; } + private void SizeTranBatch(Tran tran, long? ver) + { + if (tran != null) + { + dataOffset += 8 + FIELD_HEADER_SIZE; + + if (ver != null) + { + dataOffset += 7 + FIELD_HEADER_SIZE; + } + + if (tran.Deadline != 0) + { + dataOffset += 4 + FIELD_HEADER_SIZE; + } + } + } + private void WriteBatchHeader(Policy policy, int timeout, int fieldCount) { int readAttr = Command.INFO1_BATCH; @@ -1131,18 +1677,21 @@ private void WriteBatchHeader(Policy policy, int timeout, int fieldCount) dataOffset += 8; dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. dataBuffer[dataOffset++] = (byte)readAttr; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)0; - Array.Clear(dataBuffer, dataOffset, 12); - dataOffset += 12; - + for (int i = 0; i < 10; i++) + { + dataBuffer[dataOffset++] = 0; + } dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); } - private void WriteBatchBinNames(Key key, string[] binNames, BatchAttr attr, Expression filter) + private void WriteBatchBinNames(Key key, Tran tran, long? ver, string[] binNames, BatchAttr attr, Expression filter) { - WriteBatchRead(key, attr, filter, binNames.Length); + WriteBatchRead(key, tran, ver, attr, filter, binNames.Length); foreach (string binName in binNames) { @@ -1150,15 +1699,15 @@ private void WriteBatchBinNames(Key key, string[] binNames, BatchAttr attr, Expr } } - private void WriteBatchOperations(Key key, Operation[] ops, BatchAttr attr, Expression filter) + private void WriteBatchOperations(Key key, Tran tran, long? ver, Operation[] ops, BatchAttr attr, Expression filter) { if (attr.hasWrite) { - WriteBatchWrite(key, attr, filter, 0, ops.Length); + WriteBatchWrite(key, tran, ver, attr, filter, 0, ops.Length); } else { - WriteBatchRead(key, attr, filter, ops.Length); + WriteBatchRead(key, tran, ver, attr, filter, ops.Length); } foreach (Operation op in ops) @@ -1167,48 +1716,78 @@ private void WriteBatchOperations(Key key, Operation[] ops, BatchAttr attr, Expr } } - private void WriteBatchRead(Key key, BatchAttr attr, Expression filter, int opCount) + private void WriteBatchRead(Key key, Tran tran, long? ver, BatchAttr attr, Expression filter, int opCount) { dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_TTL); dataBuffer[dataOffset++] = (byte)attr.readAttr; dataBuffer[dataOffset++] = (byte)attr.writeAttr; dataBuffer[dataOffset++] = (byte)attr.infoAttr; + dataBuffer[dataOffset++] = (byte)attr.tranAttr; dataOffset += ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); - WriteBatchFields(key, filter, 0, opCount); + WriteBatchFields(key, tran, ver, attr,filter, 0, opCount); } - private void WriteBatchWrite(Key key, BatchAttr attr, Expression filter, int fieldCount, int opCount) + private void WriteBatchWrite(Key key, Tran tran, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) { dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_GEN | BATCH_MSG_TTL); dataBuffer[dataOffset++] = (byte)attr.readAttr; dataBuffer[dataOffset++] = (byte)attr.writeAttr; dataBuffer[dataOffset++] = (byte)attr.infoAttr; + dataBuffer[dataOffset++] = (byte)attr.tranAttr; dataOffset += ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset); dataOffset += ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + WriteBatchFields(key, tran, ver, attr, filter, fieldCount, opCount); + } - if (attr.sendKey) + private void WriteBatchFields(Key key, Tran tran, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) + { + if (tran != null) { fieldCount++; - WriteBatchFields(key, filter, fieldCount, opCount); - WriteField(key.userKey, FieldType.KEY); + + if (ver != null) + { + fieldCount++; + } + + if (attr.hasWrite && tran.Deadline != 0) + { + fieldCount++; + } } - else + + if (filter != null) { - WriteBatchFields(key, filter, fieldCount, opCount); + fieldCount++; } - } - private void WriteBatchFields(Key key, Expression filter, int fieldCount, int opCount) - { - if (filter != null) + if (attr.sendKey) { fieldCount++; - WriteBatchFields(key, fieldCount, opCount); - filter.Write(this); } - else + + WriteBatchFields(key, fieldCount, opCount); + + if (tran != null) + { + WriteFieldLE(tran.Id, FieldType.MRT_ID); + + if (ver.HasValue) + { + WriteFieldVersion(ver.Value); + } + + if (attr.hasWrite && tran.Deadline != 0) + { + WriteFieldLE(tran.Deadline, FieldType.MRT_DEADLINE); + } + } + + filter?.Write(this); + + if (attr.sendKey) { - WriteBatchFields(key, fieldCount, opCount); + WriteField(key.userKey, FieldType.KEY); } } @@ -1328,8 +1907,7 @@ NodePartitions nodePartitions foreach (PartitionStatus part in nodePartitions.partsFull) { - ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); - dataOffset += 2; + dataOffset += ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); } } @@ -1362,7 +1940,7 @@ NodePartitions nodePartitions WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); // Write taskId field - WriteField(taskId, FieldType.TRAN_ID); + WriteField(taskId, FieldType.QUERY_ID); if (binNames != null) { @@ -1622,7 +2200,7 @@ NodePartitions nodePartitions WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); // Write taskId field - WriteField(taskId, FieldType.TRAN_ID); + WriteField(taskId, FieldType.QUERY_ID); if (statement.filter != null) { @@ -1673,10 +2251,7 @@ NodePartitions nodePartitions WriteField(functionArgBuffer, FieldType.UDF_ARGLIST); } - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } + policy.filterExp?.Write(this); if (partsFullSize > 0) { @@ -1684,8 +2259,7 @@ NodePartitions nodePartitions foreach (PartitionStatus part in nodePartitions.partsFull) { - ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); - dataOffset += 2; + dataOffset += ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); } } @@ -1706,8 +2280,7 @@ NodePartitions nodePartitions foreach (PartitionStatus part in nodePartitions.partsPartial) { - ByteUtil.LongToLittleBytes(part.bval, dataBuffer, dataOffset); - dataOffset += 8; + dataOffset += ByteUtil.LongToLittleBytes(part.bval, dataBuffer, dataOffset); } } @@ -1737,7 +2310,33 @@ NodePartitions nodePartitions // Command Sizing //-------------------------------------------------- - private int EstimateKeySize(Policy policy, Key key) + private int EstimateKeyAttrSize(Policy policy, Key key, BatchAttr attr, Expression filterExp) + { + int fieldCount = EstimateKeySize(policy, key, attr.hasWrite); + + if (filterExp != null) + { + dataOffset += filterExp.Size(); + fieldCount++; + } + return fieldCount; + } + + private int EstimateKeySize(Policy policy, Key key, bool sendDeadline) + { + int fieldCount = EstimateKeySize(key); + + fieldCount += SizeTran(key, policy.Tran, sendDeadline); + + if (policy.sendKey) + { + dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; + fieldCount++; + } + return fieldCount; + } + + private int EstimateKeySize(Key key) { int fieldCount = 0; @@ -1756,11 +2355,6 @@ private int EstimateKeySize(Policy policy, Key key) dataOffset += key.digest.Length + FIELD_HEADER_SIZE; fieldCount++; - if (policy.sendKey) - { - dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; - fieldCount++; - } return fieldCount; } @@ -2078,7 +2672,50 @@ private void WriteHeaderReadHeader(Policy policy, int readAttr, int fieldCount, dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); } - private void WriteKey(Policy policy, Key key) + /// + /// Header write for batch single commands. + /// + private void WriteKeyAttr( + Policy policy, + Key key, + BatchAttr attr, + Expression filterExp, + int fieldCount, + int operationCount + ) + { + dataOffset += 8; + + // Write all header data except total size which must be written last. + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + dataBuffer[dataOffset++] = 0; // unused + dataBuffer[dataOffset++] = 0; // clear the result code + dataOffset += ByteUtil.IntToBytes((uint)attr.generation, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); + + WriteKey(policy, key, attr.hasWrite); + + filterExp?.Write(this); + } + + private void WriteKey(Policy policy, Key key, bool sendDeadline) + { + WriteKey(key); + WriteTran(policy.Tran, sendDeadline); + + if (policy.sendKey) + { + WriteField(key.userKey, FieldType.KEY); + } + } + + private void WriteKey(Key key) { // Write key into dataBuffer. if (key.ns != null) @@ -2092,11 +2729,6 @@ private void WriteKey(Policy policy, Key key) } WriteField(key.digest, FieldType.DIGEST_RIPE); - - if (policy.sendKey) - { - WriteField(key.userKey, FieldType.KEY); - } } private int WriteReadOnlyOperations(Operation[] ops, int readAttr) @@ -2139,8 +2771,7 @@ private void WriteOperation(Bin bin, Operation.Type operationType) int nameLength = ByteUtil.StringToUtf8(bin.name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); int valueLength = bin.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength); - ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); - dataOffset += 4; + dataOffset += ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); dataBuffer[dataOffset++] = (byte) bin.value.Type; dataBuffer[dataOffset++] = (byte) 0; @@ -2153,8 +2784,7 @@ private void WriteOperation(Operation operation) int nameLength = ByteUtil.StringToUtf8(operation.binName, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); int valueLength = operation.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength); - ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); - dataOffset += 4; + dataOffset += ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); dataBuffer[dataOffset++] = Operation.GetProtocolType(operation.type); dataBuffer[dataOffset++] = (byte) operation.value.Type; dataBuffer[dataOffset++] = (byte) 0; @@ -2166,8 +2796,7 @@ private void WriteOperation(string name, Operation.Type operationType) { int nameLength = ByteUtil.StringToUtf8(name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); - ByteUtil.IntToBytes((uint)(nameLength + 4), dataBuffer, dataOffset); - dataOffset += 4; + dataOffset += ByteUtil.IntToBytes((uint)(nameLength + 4), dataBuffer, dataOffset); dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); dataBuffer[dataOffset++] = (byte) 0; dataBuffer[dataOffset++] = (byte) 0; @@ -2177,14 +2806,64 @@ private void WriteOperation(string name, Operation.Type operationType) private void WriteOperation(Operation.Type operationType) { - ByteUtil.IntToBytes(4, dataBuffer, dataOffset); - dataOffset += 4; + dataOffset += ByteUtil.IntToBytes(4, dataBuffer, dataOffset); dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); dataBuffer[dataOffset++] = 0; dataBuffer[dataOffset++] = 0; dataBuffer[dataOffset++] = 0; } + private int SizeTran(Key key, Tran tran, bool sendDeadline) + { + int fieldCount = 0; + + if (tran != null) + { + dataOffset += 8 + FIELD_HEADER_SIZE; + fieldCount++; + + Version = tran.GetReadVersion(key); + + if (Version != null) + { + dataOffset += 7 + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (sendDeadline && tran.Deadline != 0) + { + dataOffset += 4 + FIELD_HEADER_SIZE; + fieldCount++; + } + } + return fieldCount; + } + + private void WriteTran(Tran tran, bool sendDeadline) + { + if (tran != null) + { + WriteFieldLE(tran.Id, FieldType.MRT_ID); + + if (Version.HasValue) + { + WriteFieldVersion(Version.Value); + } + + if (sendDeadline && tran.Deadline != 0) + { + WriteFieldLE(tran.Deadline, FieldType.MRT_DEADLINE); + } + } + } + + private void WriteFieldVersion(long ver) + { + WriteFieldHeader(7, FieldType.RECORD_VERSION); + ByteUtil.LongToVersionBytes(ver, dataBuffer, dataOffset); + dataOffset += 7; + } + private void WriteField(Value value, int type) { int offset = dataOffset + FIELD_HEADER_SIZE; @@ -2214,12 +2893,24 @@ private void WriteField(int val, int type) dataOffset += ByteUtil.IntToBytes((uint)val, dataBuffer, dataOffset); } + private void WriteFieldLE(int val, int type) + { + WriteFieldHeader(4, type); + dataOffset += ByteUtil.IntToLittleBytes((uint)val, dataBuffer, dataOffset); + } + private void WriteField(ulong val, int type) { WriteFieldHeader(8, type); dataOffset += ByteUtil.LongToBytes(val, dataBuffer, dataOffset); } + private void WriteFieldLE(long val, int type) + { + WriteFieldHeader(8, type); + dataOffset += ByteUtil.LongToLittleBytes((ulong)val, dataBuffer, dataOffset); + } + private void WriteFieldHeader(int size, int type) { dataOffset += ByteUtil.IntToBytes((uint)size + 1, dataBuffer, dataOffset); @@ -2353,10 +3044,59 @@ internal virtual Key ParseKey(int fieldCount, out ulong bval) return new Key(ns, digest, setName, userKey); } + public long? ParseVersion(int fieldCount) + { + long? version = null; + + for (int i = 0; i < fieldCount; i++) + { + int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.RECORD_VERSION && size == 7) + { + version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset); + } + dataOffset += size; + } + return version; + } + public static bool BatchInDoubt(bool isWrite, int commandSentCounter) { return isWrite && commandSentCounter > 1; } + + public interface BatchOffsets + { + int Size(); + int Get(int i); + } + + private class BatchOffsetsNative : BatchOffsets + { + private int size; + private int[] offsets; + + public BatchOffsetsNative(BatchNode batch) + { + this.size = batch.offsetsSize; + this.offsets = batch.offsets; + } + + public int Size() + { + return size; + } + + public int Get(int i) + { + return offsets[i]; + } + } } } #pragma warning restore 0618 diff --git a/AerospikeClient/Command/FieldType.cs b/AerospikeClient/Command/FieldType.cs index 75cd5fa3..7b4b01d0 100644 --- a/AerospikeClient/Command/FieldType.cs +++ b/AerospikeClient/Command/FieldType.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -21,8 +21,11 @@ public sealed class FieldType public const int NAMESPACE = 0; public const int TABLE = 1; public const int KEY = 2; + public const int RECORD_VERSION = 3; public const int DIGEST_RIPE = 4; - public const int TRAN_ID = 7; // user supplied transaction id, which is simply passed back + public const int MRT_ID = 5; + public const int MRT_DEADLINE = 6; + public const int QUERY_ID = 7; public const int SOCKET_TIMEOUT = 9; public const int RECORDS_PER_SECOND = 10; public const int PID_ARRAY = 11; diff --git a/AerospikeClient/Command/OperateArgs.cs b/AerospikeClient/Command/OperateArgs.cs index c13edab7..21ce2e4e 100644 --- a/AerospikeClient/Command/OperateArgs.cs +++ b/AerospikeClient/Command/OperateArgs.cs @@ -30,7 +30,6 @@ public OperateArgs WritePolicy policy, WritePolicy writeDefault, WritePolicy readDefault, - Key key, Operation[] operations ) { diff --git a/AerospikeClient/Command/SyncWriteCommand.cs b/AerospikeClient/Command/SyncWriteCommand.cs new file mode 100644 index 00000000..5380f2e3 --- /dev/null +++ b/AerospikeClient/Command/SyncWriteCommand.cs @@ -0,0 +1,162 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public abstract class SyncWriteCommand : SyncCommand + { + protected readonly WritePolicy writePolicy; + protected readonly Key key; + private readonly Partition partition; + + public SyncWriteCommand(Cluster cluster, WritePolicy writePolicy, Key key) + : base(cluster, writePolicy) + { + this.writePolicy = writePolicy; + this.key = key; + this.partition = Partition.Write(cluster, writePolicy, key); + cluster.AddTran(); + } + + protected internal override bool IsWrite() + { + return true; + } + + protected internal override Node GetNode() + { + return partition.GetNodeWrite(cluster); + } + + protected override Latency.LatencyType GetLatencyType() + { + return Latency.LatencyType.WRITE; + } + + protected internal override bool PrepareRetry(bool timeout) + { + partition.PrepareRetryWrite(timeout); + return true; + } + + protected int ParseHeader(IConnection conn) + { + // Read header. + conn.ReadFully(dataBuffer, 8, Command.STATE_READ_HEADER); + + long sz = ByteUtil.BytesToLong(dataBuffer, 0); + int receiveSize = (int)(sz & 0xFFFFFFFFFFFFL); + + if (receiveSize <= 0) + { + throw new AerospikeException("Invalid receive size: " + receiveSize); + } + + SizeBuffer(receiveSize); + conn.ReadFully(dataBuffer, receiveSize, Command.STATE_READ_DETAIL); + conn.UpdateLastUsed(); + + ulong type = (ulong)(sz >> 48) & 0xff; + + if (type == Command.AS_MSG_TYPE) + { + dataOffset = 5; + } + else if (type == Command.MSG_TYPE_COMPRESSED) + { + int usize = (int)ByteUtil.BytesToLong(dataBuffer, 0); + byte[] ubuf = new byte[usize]; + + ByteUtil.Decompress(dataBuffer, 8, receiveSize, ubuf, usize); + dataBuffer = ubuf; + dataOffset = 13; + } + else + { + throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE); + } + + int resultCode = dataBuffer[dataOffset] & 0xFF; + dataOffset++; + int generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + int expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 8; + int fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + int opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + + if (policy.Tran == null) + { + SkipFields(fieldCount); + if (opCount > 0) + { + throw new AerospikeException("Unexpected write response opCount: " + opCount + ',' + resultCode); + } + return resultCode; + } + + long? version = null; + + for (int i = 0; i < fieldCount; i++) + { + int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int fieldType = dataBuffer[dataOffset++]; + int size = len - 1; + + if (fieldType == FieldType.RECORD_VERSION) + { + if (size == 7) + { + version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset); + } + else + { + throw new AerospikeException("Record version field has invalid size: " + size); + } + } + dataOffset += size; + } + + policy.Tran.OnWrite(key, version, resultCode); + + if (opCount > 0) + { + throw new AerospikeException("Unexpected write response opCount: " + opCount + ',' + resultCode); + } + return resultCode; + } + + private void SkipFields(int fieldCount) + { + // There can be fields in the response (setname etc). + // But for now, ignore them. Expose them to the API if needed in the future. + for (int i = 0; i < fieldCount; i++) + { + int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4 + fieldlen; + } + } + + protected internal abstract override void WriteBuffer(); + + protected internal abstract override void ParseResult(IConnection conn); + } +} diff --git a/AerospikeClient/Command/TranAddKeys.cs b/AerospikeClient/Command/TranAddKeys.cs new file mode 100644 index 00000000..39d71567 --- /dev/null +++ b/AerospikeClient/Command/TranAddKeys.cs @@ -0,0 +1,107 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class TranAddKeys : SyncWriteCommand + { + private readonly OperateArgs args; + + public TranAddKeys (Cluster cluster, Key key, OperateArgs args) + : base(cluster, args.writePolicy, key) + { + this.args = args; + } + + protected internal override void WriteBuffer() + { + SetTranAddKeys(args.writePolicy, key, args); + } + + protected internal override void ParseResult(IConnection conn) + { + // Read header. + conn.ReadFully(dataBuffer, 8, Command.STATE_READ_HEADER); + + long sz = ByteUtil.BytesToLong(dataBuffer, 0); + int receiveSize = (int)(sz & 0xFFFFFFFFFFFFL); + + if (receiveSize <= 0) + { + throw new AerospikeException("Invalid receive size: " + receiveSize); + } + + SizeBuffer(receiveSize); + conn.ReadFully(dataBuffer, receiveSize, Command.STATE_READ_DETAIL); + conn.UpdateLastUsed(); + + ulong type = (ulong)(sz >> 48) & 0xff; + + if (type == Command.AS_MSG_TYPE) + { + dataOffset = 5; + } + else if (type == Command.MSG_TYPE_COMPRESSED) + { + int usize = (int)ByteUtil.BytesToLong(dataBuffer, 0); + byte[] ubuf = new byte[usize]; + + ByteUtil.Decompress(dataBuffer, 8, receiveSize, ubuf, usize); + dataBuffer = ubuf; + dataOffset = 13; + } + else + { + throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE); + } + + int resultCode = dataBuffer[dataOffset] & 0xFF; + dataOffset++; + int generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + int expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 8; + int fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + int opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + + for (int i = 0; i < fieldCount; i++) + { + int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int fieldType = dataBuffer[dataOffset++]; + int size = len - 1; + + if (fieldType == FieldType.MRT_DEADLINE) + { + int deadline = ByteUtil.LittleBytesToInt(dataBuffer, dataOffset); + policy.Tran.Deadline = deadline; + } + dataOffset += size; + } + + if (resultCode == ResultCode.OK) + { + return; + } + + throw new AerospikeException(resultCode); + } + } +} diff --git a/AerospikeClient/Command/TranClose.cs b/AerospikeClient/Command/TranClose.cs new file mode 100644 index 00000000..4bd1ff49 --- /dev/null +++ b/AerospikeClient/Command/TranClose.cs @@ -0,0 +1,47 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class TranClose : SyncWriteCommand + { + private readonly Tran tran; + + public TranClose(Cluster cluster, Tran tran, WritePolicy writePolicy, Key key) + : base(cluster, writePolicy, key) + { + this.tran = tran; + } + + protected internal override void WriteBuffer() + { + SetTranClose(tran, key); + } + + protected internal override void ParseResult(IConnection conn) + { + int resultCode = ParseHeader(conn); + + if (resultCode == ResultCode.OK || resultCode == ResultCode.KEY_NOT_FOUND_ERROR) + { + return; + } + + throw new AerospikeException(resultCode); + } + } +} diff --git a/AerospikeClient/Command/TranMarkRollForward.cs b/AerospikeClient/Command/TranMarkRollForward.cs new file mode 100644 index 00000000..dde97904 --- /dev/null +++ b/AerospikeClient/Command/TranMarkRollForward.cs @@ -0,0 +1,47 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class TranMarkRollForward : SyncWriteCommand + { + private readonly Tran tran; + + public TranMarkRollForward(Cluster cluster, Tran tran, WritePolicy writePolicy, Key key) + : base(cluster, writePolicy, key) + { + this.tran = tran; + } + + protected internal override void WriteBuffer() + { + SetTranMarkRollForward(tran, key); + } + + protected internal override void ParseResult(IConnection conn) + { + int resultCode = ParseHeader(conn); + + if (resultCode == ResultCode.OK || resultCode == ResultCode.BIN_EXISTS_ERROR) + { + return; + } + + throw new AerospikeException(resultCode); + } + } +} diff --git a/AerospikeClient/Command/TranMonitor.cs b/AerospikeClient/Command/TranMonitor.cs new file mode 100644 index 00000000..7fa7584c --- /dev/null +++ b/AerospikeClient/Command/TranMonitor.cs @@ -0,0 +1,160 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class TranMonitor + { + private static readonly ListPolicy OrderedListPolicy = new(ListOrder.ORDERED, + ListWriteFlags.ADD_UNIQUE | ListWriteFlags.NO_FAIL | ListWriteFlags.PARTIAL); + + private static readonly string BinNameId = "id"; + private static readonly string BinNameDigests = "keyds"; + + public static void AddKey(Cluster cluster, WritePolicy policy, Key cmdKey) + { + Tran tran = policy.Tran; + + if (tran.Writes.Contains(cmdKey)) + { + // Transaction monitor already contains this key. + return; + } + + Operation[] ops = GetTranOps(tran, cmdKey); + AddWriteKeys(cluster, policy, ops); + } + + public static void AddKeys(Cluster cluster, BatchPolicy policy, Key[] keys) + { + Operation[] ops = GetTranOps(policy.Tran, keys); + AddWriteKeys(cluster, policy, ops); + } + + public static void AddKeys(Cluster cluster, BatchPolicy policy, List records) + { + Operation[] ops = GetTranOps(policy.Tran, records); + + if (ops != null) + { + AddWriteKeys(cluster, policy, ops); + } + } + + public static Operation[] GetTranOps(Tran tran, Key cmdKey) + { + tran.Ns = cmdKey.ns; + + if (tran.Deadline == 0) + { + // No existing monitor record. + return new Operation[] { + Operation.Put(new Bin(BinNameId, tran.Id)), + ListOperation.Append(OrderedListPolicy, BinNameDigests, Value.Get(cmdKey.digest)) + }; + } + else + { + return new Operation[] { + ListOperation.Append(OrderedListPolicy, BinNameDigests, Value.Get(cmdKey.digest)) + }; + } + } + + public static Operation[] GetTranOps(Tran tran, Key[] keys) + { + List list = new(keys.Length); + + foreach (Key key in keys) + { + tran.Ns = key.ns; + list.Add(Value.Get(key.digest)); + } + return GetTranOps(tran, list); + } + + public static Operation[] GetTranOps(Tran tran, List records) + { + List list = new(records.Count); + + foreach (BatchRecord br in records) { + tran.Ns = br.key.ns; + + if (br.hasWrite) + { + list.Add(Value.Get(br.key.digest)); + } + } + + if (list.Count == 0) + { + // Readonly batch does not need to add key digests. + return null; + } + return GetTranOps(tran, list); + } + + private static Operation[] GetTranOps(Tran tran, List list) + { + if (tran.Deadline == 0) + { + // No existing monitor record. + return new Operation[] { + Operation.Put(new Bin(BinNameId, tran.Id)), + ListOperation.AppendItems(OrderedListPolicy, BinNameDigests, list) + }; + } + else + { + return new Operation[] { + ListOperation.AppendItems(OrderedListPolicy, BinNameDigests, list) + }; + } + } + + private static void AddWriteKeys(Cluster cluster, Policy policy, Operation[] ops) + { + Key tranKey = GetTranMonitorKey(policy.Tran); + WritePolicy wp = CopyTimeoutPolicy(policy); + OperateArgs args = new(wp, null, null, ops); + TranAddKeys cmd = new(cluster, tranKey, args); + cmd.Execute(); + } + + public static Key GetTranMonitorKey(Tran tran) + { + return new Key(tran.Ns, " keySet = tran.Writes; + + if (keySet.Count != 0) + { + // Tell MRT monitor that a roll-forward will commence. + try + { + MarkRollForward(writePolicy, tranKey); + } + catch (Exception t) + { + throw new AerospikeException.Commit(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, verifyRecords, rollRecords, t); + } + + // Roll-forward writes in batch. + try + { + Roll(rollPolicy, Command.INFO4_MRT_ROLL_FORWARD); + } + catch (Exception t) + { + throw new AerospikeException.Commit(CommitErrorType.ROLL_FORWARD_ABANDONED, verifyRecords, rollRecords, t); + } + } + + if (tran.Deadline != 0) + { + // Remove MRT monitor. + try + { + Close(writePolicy, tranKey); + } + catch (Exception t) + { + throw new AerospikeException.Commit(CommitErrorType.CLOSE_ABANDONED, verifyRecords, rollRecords, t); + } + } + } + + public void Abort(BatchPolicy rollPolicy) + { + HashSet keySet = tran.Writes; + + if (keySet.Count != 0) + { + try + { + Roll(rollPolicy, Command.INFO4_MRT_ROLL_BACK); + } + catch (Exception t) + { + throw new AerospikeException.Abort(AbortErrorType.ROLL_BACK_ABANDONED, rollRecords, t); + } + } + + if (tran.Deadline != 0) + { + try + { + WritePolicy writePolicy = new(rollPolicy); + Key tranKey = TranMonitor.GetTranMonitorKey(tran); + Close(writePolicy, tranKey); + } + catch (Exception t) + { + throw new AerospikeException.Abort(AbortErrorType.CLOSE_ABANDONED, rollRecords, t); + } + } + } + private void Verify(BatchPolicy verifyPolicy) + { + // Validate record versions in a batch. + HashSet> reads = tran.Reads.ToHashSet>(); + int max = reads.Count; + if (max == 0) + { + return; + } + + BatchRecord[] records = new BatchRecord[max]; + Key[] keys = new Key[max]; + long[] versions = new long[max]; + int count = 0; + + foreach (KeyValuePair entry in reads) + { + Key key = entry.Key; + keys[count] = key; + records[count] = new BatchRecord(key, false); + versions[count] = entry.Value; + count++; + } + this.verifyRecords = records; + + BatchStatus status = new(true); + List bns = BatchNode.GenerateList(cluster, verifyPolicy, keys, records, false, status); + BatchCommand[] commands = new BatchCommand[bns.Count]; + + count = 0; + + foreach (BatchNode bn in bns) + { + commands[count++] = new BatchTranVerify( + cluster, bn, verifyPolicy, tran, keys, versions, records, status); + } + + BatchExecutor.Execute(cluster, verifyPolicy, commands, status); + + if (!status.GetStatus()) + { + throw new AerospikeException("Failed to verify one or more record versions"); + } + } + + private void MarkRollForward(WritePolicy writePolicy, Key tranKey) + { + // Tell MRT monitor that a roll-forward will commence. + TranMarkRollForward cmd = new(cluster, tran, writePolicy, tranKey); + cmd.Execute(); + } + + private void Roll(BatchPolicy rollPolicy, int tranAttr) + { + HashSet keySet = tran.Writes; + + if (keySet.Count == 0) + { + return; + } + + Key[] keys = keySet.ToArray(); + BatchRecord[] records = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + records[i] = new BatchRecord(keys[i], true); + } + + this.rollRecords = records; + + // Copy tran roll policy because it needs to be modified. + BatchPolicy batchPolicy = new(rollPolicy); + + BatchAttr attr = new(); + attr.SetTran(tranAttr); + BatchStatus status = new(true); + + // generate() requires a null tran instance. + List bns = BatchNode.GenerateList(cluster, batchPolicy, keys, records, true, status); + BatchCommand[] commands = new BatchCommand[bns.Count]; + + // Batch roll forward requires the tran instance. + batchPolicy.Tran = tran; + + int count = 0; + + foreach (BatchNode bn in bns) + { + commands[count++] = new BatchTranRoll( + cluster, bn, batchPolicy, keys, records, attr, status); + } + BatchExecutor.Execute(cluster, batchPolicy, commands, status); + + if (!status.GetStatus()) + { + string rollString = tranAttr == Command.INFO4_MRT_ROLL_FORWARD ? "commit" : "abort"; + throw new AerospikeException("Failed to " + rollString + " one or more records"); + } + } + + private void Close(WritePolicy writePolicy, Key tranKey) + { + // Delete MRT monitor on server. + TranClose cmd = new(cluster, tran, writePolicy, tranKey); + cmd.Execute(); + + // Reset MRT on client. + tran.Clear(); + } + } +} diff --git a/AerospikeClient/Command/WriteCommand.cs b/AerospikeClient/Command/WriteCommand.cs index 07984aff..05b9eb91 100644 --- a/AerospikeClient/Command/WriteCommand.cs +++ b/AerospikeClient/Command/WriteCommand.cs @@ -17,40 +17,19 @@ namespace Aerospike.Client { - public sealed class WriteCommand : SyncCommand + public sealed class WriteCommand : SyncWriteCommand { - private readonly WritePolicy writePolicy; - private readonly Key key; - private readonly Partition partition; private readonly Bin[] bins; private readonly Operation.Type operation; public WriteCommand(Cluster cluster, WritePolicy writePolicy, Key key, Bin[] bins, Operation.Type operation) - : base(cluster, writePolicy) + : base(cluster, writePolicy, key) { - this.writePolicy = writePolicy; - this.key = key; - this.partition = Partition.Write(cluster, writePolicy, key); this.bins = bins; this.operation = operation; cluster.AddTran(); } - protected internal override bool IsWrite() - { - return true; - } - - protected internal override Node GetNode() - { - return partition.GetNodeWrite(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } - protected internal override void WriteBuffer() { SetWrite(writePolicy, operation, key, bins); @@ -80,11 +59,5 @@ protected internal override void ParseResult(IConnection conn) throw new AerospikeException(resultCode); } - - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryWrite(timeout); - return true; - } } } diff --git a/AerospikeClient/Listener/AbortListener.cs b/AerospikeClient/Listener/AbortListener.cs new file mode 100644 index 00000000..3a86682e --- /dev/null +++ b/AerospikeClient/Listener/AbortListener.cs @@ -0,0 +1,35 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +namespace Aerospike.Client +{ + /// + /// Asynchronous result notifications for multi-record transaction (MRT) aborts. + /// + public interface AbortListener + { + /// + /// This method is called when the abort succeeds. + /// + void OnSuccess(); + + /// + /// This method is called when the abort fails. + /// + /// error that occurred + void OnFailure(AerospikeException exception); + } +} diff --git a/AerospikeClient/Listener/CommitListener.cs b/AerospikeClient/Listener/CommitListener.cs new file mode 100644 index 00000000..f77bc571 --- /dev/null +++ b/AerospikeClient/Listener/CommitListener.cs @@ -0,0 +1,35 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +namespace Aerospike.Client +{ + /// + /// Asynchronous result notifications for multi-record transaction (MRT) commits. + /// + public interface CommitListener + { + /// + /// This method is called when the records are verified and the commit succeeds. + /// + void OnSuccess(); + + /// + /// This method is called when the commit fails. + /// + /// error that occurred + void OnFailure(AerospikeException exception); + } +} diff --git a/AerospikeClient/Main/AbortError.cs b/AerospikeClient/Main/AbortError.cs new file mode 100644 index 00000000..83997c91 --- /dev/null +++ b/AerospikeClient/Main/AbortError.cs @@ -0,0 +1,41 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + /// + /// Multi-record transaction (MRT) abort error status. + /// + public static class AbortError + { + public enum AbortErrorType + { + ROLL_BACK_ABANDONED, + CLOSE_ABANDONED + } + + public static string AbortErrorToString(AbortErrorType error) + { + return error switch + { + AbortErrorType.ROLL_BACK_ABANDONED => "MRT client roll back abandoned. Server will eventually abort the MRT.", + AbortErrorType.CLOSE_ABANDONED => "MRT has been rolled back, but MRT client close was abandoned. Server will eventually close the MRT.", + _ => "Unexpected AbortErrorType" + }; + } + } +} diff --git a/AerospikeClient/Main/AerospikeClient.cs b/AerospikeClient/Main/AerospikeClient.cs index 0ceecc27..852ebb87 100644 --- a/AerospikeClient/Main/AerospikeClient.cs +++ b/AerospikeClient/Main/AerospikeClient.cs @@ -94,6 +94,17 @@ public class AerospikeClient : IDisposable, IAerospikeClient /// public BatchUDFPolicy batchUDFPolicyDefault; + /// + /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// + public BatchPolicy tranVerifyPolicyDefault; + + /// + /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) + /// or back(abort) in a batch. + /// + public BatchPolicy tranRollPolicyDefault; + /// /// Default info policy that is used when info command policy is null. /// @@ -190,6 +201,8 @@ public AerospikeClient(ClientPolicy policy, params Host[] hosts) this.batchWritePolicyDefault = policy.batchWritePolicyDefault; this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; + this.tranVerifyPolicyDefault = policy.tranVerifyPolicyDefault; + this.tranRollPolicyDefault = policy.tranRollPolicyDefault; this.infoPolicyDefault = policy.infoPolicyDefault; this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); @@ -214,6 +227,8 @@ protected internal AerospikeClient(ClientPolicy policy) this.batchWritePolicyDefault = policy.batchWritePolicyDefault; this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; + this.tranVerifyPolicyDefault = policy.tranVerifyPolicyDefault; + this.tranRollPolicyDefault = policy.tranRollPolicyDefault; this.infoPolicyDefault = policy.infoPolicyDefault; } else @@ -227,6 +242,8 @@ protected internal AerospikeClient(ClientPolicy policy) this.batchWritePolicyDefault = new BatchWritePolicy(); this.batchDeletePolicyDefault = new BatchDeletePolicy(); this.batchUDFPolicyDefault = new BatchUDFPolicy(); + this.tranVerifyPolicyDefault = new TranVerifyPolicy(); + this.tranRollPolicyDefault= new TranRollPolicy(); this.infoPolicyDefault = new InfoPolicy(); } this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); @@ -320,6 +337,25 @@ public BatchUDFPolicy BatchUDFPolicyDefault set { batchUDFPolicyDefault = value; } } + /// + /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// + public BatchPolicy TranVerifyPolicyDefault + { + get { return tranVerifyPolicyDefault; } + set { tranVerifyPolicyDefault = value; } + } + + /// + /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) + /// or back(abort) in a batch. + /// + public BatchPolicy TranRollPolicyDefault + { + get { return tranRollPolicyDefault; } + set { tranRollPolicyDefault = value; } + } + /// /// Default info policy that is used when info command policy is null. /// @@ -422,6 +458,42 @@ public ClusterStats GetClusterStats() return cluster.GetStats(); } + //------------------------------------------------------- + // Multi-Record Transactions + //------------------------------------------------------- + + /// + /// Attempt to commit the given multi-record transaction. First, the expected record versions are + /// sent to the server nodes for verification.If all nodes return success, the transaction is + /// committed.Otherwise, the transaction is aborted. + ///

+ /// Requires server version 8.0+ + ///

+ ///
+ /// multi-record transaction + public void Commit(Tran tran) + { + tran.SetRollAttempted(); + + TranRoll tm = new TranRoll(cluster, tran); + tm.Commit(tranVerifyPolicyDefault, tranRollPolicyDefault); + } + + /// + /// Abort and rollback the given multi-record transaction. + ///

+ /// Requires server version 8.0+ + ///

+ ///
+ /// multi-record transaction + public void Abort(Tran tran) + { + tran.SetRollAttempted(); + + TranRoll tm = new TranRoll(cluster, tran); + tm.Abort(tranRollPolicyDefault); + } + //------------------------------------------------------- // Write Record Operations //------------------------------------------------------- @@ -1137,7 +1209,7 @@ public Record Join(BatchPolicy policy, Key key, params Join[] joins) /// if command fails public Record Operate(WritePolicy policy, Key key, params Operation[] operations) { - OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, key, operations); + OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, operations); OperateCommand command = new OperateCommand(cluster, key, args); command.Execute(); return command.Record; @@ -2027,7 +2099,7 @@ params CTX[] ctx sb.Append(";indexdata="); sb.Append(binName); - sb.Append(","); + sb.Append(','); sb.Append(indexType); // Send index command to one node. That node will distribute the command to other nodes. diff --git a/AerospikeClient/Main/AerospikeException.cs b/AerospikeClient/Main/AerospikeException.cs index 257a35fc..ec7268b0 100644 --- a/AerospikeClient/Main/AerospikeException.cs +++ b/AerospikeClient/Main/AerospikeException.cs @@ -15,6 +15,8 @@ * the License. */ using System.Text; +using static Aerospike.Client.AbortError; +using static Aerospike.Client.CommitError; namespace Aerospike.Client { @@ -41,6 +43,12 @@ public AerospikeException(int resultCode, Exception e) this.resultCode = resultCode; } + public AerospikeException(int resultCode, string message) + : base(message) + { + this.resultCode = resultCode; + } + public AerospikeException(int resultCode) : base("") { @@ -76,7 +84,7 @@ public override string Message { get { - StringBuilder sb = new StringBuilder(512); + StringBuilder sb = new(512); sb.Append("Error "); sb.Append(resultCode); @@ -299,7 +307,7 @@ public override string Message return "Client timeout: " + totalTimeout; } - StringBuilder sb = new StringBuilder(512); + StringBuilder sb = new(512); if (client) { @@ -487,6 +495,12 @@ public BatchRecordArray(BatchRecord[] records, Exception e) { this.records = records; } + + public BatchRecordArray(BatchRecord[] records, String message, Exception e) + : base(ResultCode.BATCH_FAILED, message, e) + { + this.records = records; + } } /// @@ -588,5 +602,138 @@ public EndOfGRPCStream(int resultCode) ResultCode = resultCode; } } + + /// + /// Exception thrown when {@link AerospikeClient#commit(com.aerospike.client.Tran)} fails. + /// + public sealed class Commit : AerospikeException + { + /// + /// Error status of the attempted commit. + /// + public readonly CommitErrorType Error; + + /// + /// Verify result for each read key in the MRT. May be null if failure occurred before verify. + /// + public readonly BatchRecord[] VerifyRecords; + + /// + /// Roll forward/backward result for each write key in the MRT. May be null if failure occurred before + /// roll forward/backward. + /// + public readonly BatchRecord[] RollRecords; + + public Commit(CommitErrorType error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords) + : base(ResultCode.TRAN_FAILED, CommitErrorToString(error)) + { + this.Error = error; + this.VerifyRecords = verifyRecords; + this.RollRecords = rollRecords; + } + + public Commit(CommitErrorType error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords, Exception cause) + : base(ResultCode.TRAN_FAILED, CommitErrorToString(error), cause) + { + this.Error = error; + this.VerifyRecords = verifyRecords; + this.RollRecords = rollRecords; + } + + /// + /// Get Commit message with records. + /// + public override string Message + { + get + { + StringBuilder sb = new(1024); + RecordsToString(sb, "verify errors:", VerifyRecords); + RecordsToString(sb, "roll errors:", RollRecords); + return BaseMessage + sb.ToString(); + } + } + } + + /// + /// Exception thrown when {@link AerospikeClient#abort(com.aerospike.client.Tran)} fails. + /// + public sealed class Abort : AerospikeException + { + /// + /// Error status of the attempted abort. + /// + public readonly AbortErrorType Error; + + /// + /// Roll backward result for each write key in the MRT. May be null if failure occurred before roll backward. + /// + public readonly BatchRecord[] RollRecords; + + public Abort(AbortErrorType error, BatchRecord[] rollRecords) + : base(ResultCode.TRAN_FAILED, AbortErrorToString(error)) + { + this.Error = error; + this.RollRecords = rollRecords; + } + + public Abort(AbortErrorType error, BatchRecord[] rollRecords, Exception cause) + : base(ResultCode.TRAN_FAILED, AbortErrorToString(error), cause) + { + this.Error = error; + this.RollRecords = rollRecords; + } + + /// + /// Get Commit message with records. + /// + public override string Message + { + get + { + StringBuilder sb = new(1024); + RecordsToString(sb, "roll errors:", RollRecords); + return BaseMessage + sb.ToString(); + } + } + } + + private static void RecordsToString(StringBuilder sb, String title, BatchRecord[] records) + { + if (records == null) + { + return; + } + + int count = 0; + + foreach (BatchRecord br in records) { + // Only show results with an error response. + if (!(br.resultCode == ResultCode.OK || br.resultCode == ResultCode.NO_RESPONSE)) + { + // Only show first 3 errors. + if (count >= 3) + { + sb.Append(System.Environment.NewLine); + sb.Append("..."); + break; + } + + if (count == 0) + { + sb.Append(System.Environment.NewLine); + sb.Append(title); + } + + sb.Append(System.Environment.NewLine); + sb.Append(br.key); + sb.Append(','); + sb.Append(br.resultCode); + sb.Append(','); + sb.Append(br.inDoubt); + count++; + } + } + } } } diff --git a/AerospikeClient/Main/CommitError.cs b/AerospikeClient/Main/CommitError.cs new file mode 100644 index 00000000..6a166eb5 --- /dev/null +++ b/AerospikeClient/Main/CommitError.cs @@ -0,0 +1,49 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + /// + /// Multi-record transaction (MRT) error status. + /// + public static class CommitError + { + public enum CommitErrorType + { + VERIFY_FAIL, + VERIFY_FAIL_CLOSE_ABANDONED, + VERIFY_FAIL_ABORT_ABANDONED, + MARK_ROLL_FORWARD_ABANDONED, + ROLL_FORWARD_ABANDONED, + CLOSE_ABANDONED + } + + public static string CommitErrorToString(CommitErrorType type) + { + return type switch + { + CommitErrorType.VERIFY_FAIL => "MRT verify failed. MRT aborted.", + CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED => "MRT verify failed. MRT aborted. MRT client close abandoned. Server will eventually close the MRT.", + CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED => "MRT verify failed. MRT client abort abandoned. Server will eventually abort the MRT.", + CommitErrorType.MARK_ROLL_FORWARD_ABANDONED => "MRT client mark roll forward abandoned. Server will eventually abort the MRT.", + CommitErrorType.ROLL_FORWARD_ABANDONED => "MRT client roll forward abandoned. Server will eventually commit the MRT.", + CommitErrorType.CLOSE_ABANDONED => "MRT has been rolled forward, but MRT client close was abandoned. Server will eventually close the MRT.", + _ => "Unexpected CommitErrorType" + }; + } + } +} diff --git a/AerospikeClient/Main/IAerospikeClient.cs b/AerospikeClient/Main/IAerospikeClient.cs index f8180f88..8e39e632 100644 --- a/AerospikeClient/Main/IAerospikeClient.cs +++ b/AerospikeClient/Main/IAerospikeClient.cs @@ -73,6 +73,17 @@ public interface IAerospikeClient /// BatchUDFPolicy BatchUDFPolicyDefault { get; set; } + /// + /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// + BatchPolicy TranVerifyPolicyDefault { get; set; } + + /// + /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) + /// or back(abort) in a batch. + /// + BatchPolicy TranRollPolicyDefault { get; set; } + /// /// Default info policy that is used when info command policy is null. /// @@ -118,6 +129,30 @@ public interface IAerospikeClient /// ClusterStats GetClusterStats(); + //------------------------------------------------------- + // Multi-Record Transactions + //------------------------------------------------------- + + /// + /// Attempt to commit the given multi-record transaction. First, the expected record versions are + /// sent to the server nodes for verification.If all nodes return success, the transaction is + /// committed.Otherwise, the transaction is aborted. + ///

+ /// Requires server version 8.0+ + ///

+ ///
+ /// multi-record transaction + void Commit(Tran tran); + + /// + /// Abort and rollback the given multi-record transaction. + ///

+ /// Requires server version 8.0+ + ///

+ ///
+ /// multi-record transaction + void Abort(Tran tran); + //------------------------------------------------------- // Write Record Operations //------------------------------------------------------- diff --git a/AerospikeClient/Main/ResultCode.cs b/AerospikeClient/Main/ResultCode.cs index e1093899..83a0416b 100644 --- a/AerospikeClient/Main/ResultCode.cs +++ b/AerospikeClient/Main/ResultCode.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -21,6 +21,12 @@ namespace Aerospike.Client /// public sealed class ResultCode { + /// + /// Multi-record transaction failed + /// Value: -17 + /// + public const int TRAN_FAILED = -17; + /// /// One or more keys failed in a batch. /// Value: -16 @@ -276,6 +282,24 @@ public sealed class ResultCode /// public const int LOST_CONFLICT = 28; + /// + /// MRT record blocked by a different transaction. + /// Value: 29 + /// + public const int MRT_BLOCKED = 29; + + /// + /// MRT read verify failed. Some other command changed record outside of the transaction. + /// Value: 30 + /// + public const int MRT_CONFLICT = 30; + + /// + /// MRT deadline reached without a successful commit or abort. + /// Value: 31 + /// + public const int MRT_EXPIRED = 31; + /// /// There are no more records left for query. /// Value: 50 @@ -542,6 +566,9 @@ public static string GetResultString(int resultCode) { switch (resultCode) { + case TRAN_FAILED: + return "Multi-record transaction failed"; + case BATCH_FAILED: return "One or more keys failed in a batch"; @@ -668,6 +695,15 @@ public static string GetResultString(int resultCode) case LOST_CONFLICT: return "Transaction failed due to conflict with XDR"; + case MRT_BLOCKED: + return "MRT record blocked by a different transaction"; + + case MRT_CONFLICT: + return "MRT verify failed"; + + case MRT_EXPIRED: + return "MRT expired"; + case QUERY_END: return "Query end"; diff --git a/AerospikeClient/Main/Tran.cs b/AerospikeClient/Main/Tran.cs new file mode 100644 index 00000000..009cae98 --- /dev/null +++ b/AerospikeClient/Main/Tran.cs @@ -0,0 +1,149 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +using System.Collections.Concurrent; +using System.Collections.Generic; + +namespace Aerospike.Client +{ + /// + /// Mutli-record transaction (MRT). Each command in the MRT must use the same namespace. + /// + public class Tran + { + public long Id { get; private set; } + public ConcurrentDictionary Reads { get; private set; } + public HashSet Writes { get; private set; } + public string Ns { get; set; } + public int Deadline { get; set; } + + private bool rollAttempted; + + /// + /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with default capacities. + /// + public Tran() + { + Id = CreateId(); + Reads = new ConcurrentDictionary(); + Writes = new HashSet(); + } + + /// + /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with given capacities. + /// + /// expected number of record reads in the MRT. Minimum value is 16. + /// expected number of record writes in the MRT. Minimum value is 16. + public Tran(int readsCapacity, int writesCapacity) + { + if (readsCapacity < 16) + { + readsCapacity = 16; + } + + if (writesCapacity < 16) + { + writesCapacity = 16; + } + + Id = CreateId(); + Reads = new ConcurrentDictionary(-1, readsCapacity); // TODO: concurrency level + Writes = new HashSet(writesCapacity); + } + + private static long CreateId() + { + // An id of zero is considered invalid. Create random numbers + // in a loop until non-zero is returned. + Random r = new(); + long id = r.NextInt64(); + + while (id == 0) + { + id = r.NextInt64(); + } + return id; + } + + /// + /// Process the results of a record read. For internal use only. + /// + /// + /// + internal void OnRead(Key key, long? version) + { + Ns = key.ns; + + if (version.HasValue) + { + Reads.TryAdd(key, version.Value); + } + } + + /// + /// Get record version for a given key. + /// + /// + /// + public long GetReadVersion(Key key) + { + return Reads[key]; + } + + /// + /// Process the results of a record write. For internal use only. + /// + /// + /// + /// + public void OnWrite(Key key, long? version, int resultCode) + { + // Write commands set namespace prior to sending the command, so there is + // no need to call it here when receiving the response. + if (version.HasValue) + { + Reads.TryAdd(key, version.Value); + } + else + { + if (resultCode == ResultCode.OK) + { + Reads.Remove(key, out _); + Writes.Add(key); + } + } + } + + public void SetRollAttempted() + { + if (rollAttempted) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, + "commit() or abort() may only be called once for a given MRT"); + } + rollAttempted = true; + } + + public void Clear() + { + Ns = null; + Deadline = 0; + Reads.Clear(); + Writes.Clear(); + } + } +} diff --git a/AerospikeClient/Policy/BatchPolicy.cs b/AerospikeClient/Policy/BatchPolicy.cs index 46b54fb7..0e040327 100644 --- a/AerospikeClient/Policy/BatchPolicy.cs +++ b/AerospikeClient/Policy/BatchPolicy.cs @@ -23,7 +23,7 @@ namespace Aerospike.Client /// /// Batch parent policy. /// - public sealed class BatchPolicy : Policy + public class BatchPolicy : Policy { /// /// Maximum number of concurrent synchronous batch node request threads to server nodes. diff --git a/AerospikeClient/Policy/ClientPolicy.cs b/AerospikeClient/Policy/ClientPolicy.cs index 9947761f..be3dca01 100644 --- a/AerospikeClient/Policy/ClientPolicy.cs +++ b/AerospikeClient/Policy/ClientPolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -225,7 +225,18 @@ public class ClientPolicy /// Default user defined function policy used in batch UDF excecute commands. /// public BatchUDFPolicy batchUDFPolicyDefault = new BatchUDFPolicy(); - + + /// + /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// + public TranVerifyPolicy tranVerifyPolicyDefault = new TranVerifyPolicy(); + + /// + /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) + /// or back(abort) in a batch. + /// + public TranRollPolicy tranRollPolicyDefault = new TranRollPolicy(); + /// /// Default info policy that is used when info command's policy is null. /// @@ -326,6 +337,8 @@ public ClientPolicy(ClientPolicy other) this.batchWritePolicyDefault = new BatchWritePolicy(other.batchWritePolicyDefault); this.batchDeletePolicyDefault = new BatchDeletePolicy(other.batchDeletePolicyDefault); this.batchUDFPolicyDefault = new BatchUDFPolicy(other.batchUDFPolicyDefault); + this.tranVerifyPolicyDefault = new TranVerifyPolicy(other.tranVerifyPolicyDefault); + this.tranRollPolicyDefault = new TranRollPolicy(other.tranRollPolicyDefault); this.infoPolicyDefault = new InfoPolicy(other.infoPolicyDefault); this.tlsPolicy = (other.tlsPolicy != null) ? new TlsPolicy(other.tlsPolicy) : null; this.ipMap = other.ipMap; diff --git a/AerospikeClient/Policy/Policy.cs b/AerospikeClient/Policy/Policy.cs index 4fbe4f93..757f2129 100644 --- a/AerospikeClient/Policy/Policy.cs +++ b/AerospikeClient/Policy/Policy.cs @@ -25,6 +25,14 @@ namespace Aerospike.Client /// public class Policy { + /// + /// Multi-record transaction identifier. + /// + /// Default: null + /// + /// + public Tran Tran { get; set; } + /// /// Read policy for AP (availability) namespaces. /// @@ -275,6 +283,7 @@ public class Policy /// public Policy(Policy other) { + this.Tran = other.Tran; this.readModeAP = other.readModeAP; this.readModeSC = other.readModeSC; this.replica = other.replica; diff --git a/AerospikeClient/Policy/TranRollPolicy.cs b/AerospikeClient/Policy/TranRollPolicy.cs new file mode 100644 index 00000000..c123c2fa --- /dev/null +++ b/AerospikeClient/Policy/TranRollPolicy.cs @@ -0,0 +1,45 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + /// + /// Multi-record transaction (MRT) policy fields used to batch roll forward/backward records on + /// commit or abort.Used a placeholder for now as there are no additional fields beyond BatchPolicy. + /// + public sealed class TranRollPolicy : BatchPolicy + { + /// + /// Copy policy from another policy. + /// + public TranRollPolicy(TranRollPolicy other) : + base(other) + { + } + + /// + /// Default constructor. + /// + public TranRollPolicy() + { + replica = Replica.MASTER; + maxRetries = 5; + totalTimeout = 10000; + sleepBetweenRetries = 1000; + } + } +} diff --git a/AerospikeClient/Policy/TranVerifyPolicy.cs b/AerospikeClient/Policy/TranVerifyPolicy.cs new file mode 100644 index 00000000..e641ba33 --- /dev/null +++ b/AerospikeClient/Policy/TranVerifyPolicy.cs @@ -0,0 +1,46 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + /// + /// Multi-record transaction (MRT) policy fields used to batch verify record versions on commit. + /// Used a placeholder for now as there are no additional fields beyond BatchPolicy. + /// + public sealed class TranVerifyPolicy : BatchPolicy + { + /// + /// Copy policy from another policy. + /// + public TranVerifyPolicy(TranVerifyPolicy other) : + base(other) + { + } + + /// + /// Default constructor. + /// + public TranVerifyPolicy() + { + readModeSC = ReadModeSC.LINEARIZE; + replica = Replica.MASTER; + maxRetries = 5; + totalTimeout = 10000; + sleepBetweenRetries = 1000; + } + } +} diff --git a/AerospikeClientProxy/Proxy/AerospikeClientProxy.cs b/AerospikeClientProxy/Proxy/AerospikeClientProxy.cs index afa3028e..e19597df 100644 --- a/AerospikeClientProxy/Proxy/AerospikeClientProxy.cs +++ b/AerospikeClientProxy/Proxy/AerospikeClientProxy.cs @@ -100,6 +100,17 @@ public class AerospikeClientProxy : IDisposable, IAerospikeClient /// public BatchUDFPolicy batchUDFPolicyDefault { get; set; } + /// + /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// + public BatchPolicy tranVerifyPolicyDefault { get; set; } + + /// + /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) + /// or back(abort) in a batch. + /// + public BatchPolicy tranRollPolicyDefault { get; set; } + /// /// Default info policy that is used when info command policy is null. /// @@ -150,6 +161,8 @@ public AerospikeClientProxy(ClientPolicy policy, params Host[] hosts) this.batchWritePolicyDefault = policy.batchWritePolicyDefault; this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; + this.tranVerifyPolicyDefault = policy.tranVerifyPolicyDefault; + this.tranRollPolicyDefault = policy.tranRollPolicyDefault; this.infoPolicyDefault = policy.infoPolicyDefault; this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); @@ -304,6 +317,25 @@ public BatchUDFPolicy BatchUDFPolicyDefault set { batchUDFPolicyDefault = value; } } + /// + /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// + public BatchPolicy TranVerifyPolicyDefault + { + get { return tranVerifyPolicyDefault; } + set { tranVerifyPolicyDefault = value; } + } + + /// + /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) + /// or back(abort) in a batch. + /// + public BatchPolicy TranRollPolicyDefault + { + get { return tranRollPolicyDefault; } + set { tranRollPolicyDefault = value; } + } + /// /// Default info policy that is used when info command policy is null. /// @@ -424,6 +456,36 @@ private string GetVersion() return response.Version; } + //------------------------------------------------------- + // Multi-Record Transactions + //------------------------------------------------------- + + /// + /// Attempt to commit the given multi-record transaction. First, the expected record versions are + /// sent to the server nodes for verification.If all nodes return success, the transaction is + /// committed.Otherwise, the transaction is aborted. + ///

+ /// Requires server version 8.0+ + ///

+ ///
+ /// multi-record transaction + public void Commit(Tran tran) + { + + } + + /// + /// Abort and rollback the given multi-record transaction. + ///

+ /// Requires server version 8.0+ + ///

+ ///
+ /// multi-record transaction + public void Abort(Tran tran) + { + + } + //------------------------------------------------------- // Write Record Operations //------------------------------------------------------- @@ -979,7 +1041,7 @@ public Record Join(BatchPolicy policy, Key key, params Join[] joins) /// if command fails public Record Operate(WritePolicy policy, Key key, params Operation[] operations) { - OperateArgs args = new(policy, writePolicyDefault, operatePolicyReadDefault, key, operations); + OperateArgs args = new(policy, writePolicyDefault, operatePolicyReadDefault, operations); Buffer buffer = new(); OperateCommandProxy command = new(buffer, Channel, key, args); command.Execute(); diff --git a/AerospikeClientProxy/Proxy/AsyncClientProxy.cs b/AerospikeClientProxy/Proxy/AsyncClientProxy.cs index 2af97038..b9990631 100644 --- a/AerospikeClientProxy/Proxy/AsyncClientProxy.cs +++ b/AerospikeClientProxy/Proxy/AsyncClientProxy.cs @@ -111,6 +111,44 @@ public AsyncClientProxy(AsyncClientPolicy policy, params Host[] hosts) { } + //------------------------------------------------------- + // Multi-Record Transactions + //------------------------------------------------------- + + /// + /// Asynchronously attempt to commit the given multi-record transaction. First, the expected + /// record versions are sent to the server nodes for verification.If all nodes return success, + /// the transaction is committed.Otherwise, the transaction is aborted. + ///

+ /// This method registers the command with an event loop and returns. + /// The event loop thread will process the command and send the results to the listener. + ///

+ /// Requires server version 8.0+ + ///

+ ///
+ /// where to send results + /// multi-record transaction + public void Commit(CommitListener listener, Tran tran) + { + + } + + /// + /// Asynchronously abort and rollback the given multi-record transaction. + ///

+ /// This method registers the command with an event loop and returns. + /// The event loop thread will process the command and send the results to the listener. + ///

+ /// Requires server version 8.0+ + ///

+ ///
+ /// where to send results + /// multi-record transaction + public void Abort(AbortListener listener, Tran tran) + { + + } + //------------------------------------------------------- // Write Record Operations //------------------------------------------------------- @@ -993,7 +1031,7 @@ public void GetHeader(BatchPolicy policy, RecordSequenceListener listener, Key[] /// if queue is full public async Task Operate(WritePolicy policy, CancellationToken token, Key key, params Operation[] ops) { - OperateArgs args = new(policy, writePolicyDefault, operatePolicyReadDefault, key, ops); + OperateArgs args = new(policy, writePolicyDefault, operatePolicyReadDefault, ops); Buffer buffer = new(); OperateCommandProxy command = new(buffer, Channel, key, args); return await command.Execute(token); diff --git a/AerospikeClientProxy/Proxy/BatchProxy.cs b/AerospikeClientProxy/Proxy/BatchProxy.cs index 63059f10..726efc91 100644 --- a/AerospikeClientProxy/Proxy/BatchProxy.cs +++ b/AerospikeClientProxy/Proxy/BatchProxy.cs @@ -44,7 +44,7 @@ BatchStatus status protected internal override void WriteBuffer() { - SetBatchOperate(BatchPolicy, (IList)Records, Batch); + SetBatchOperate(BatchPolicy, null, null, null, (IList)Records, Batch); } protected internal override bool ParseRow() @@ -141,7 +141,7 @@ BatchStatus status protected internal override void WriteBuffer() { - SetBatchOperate(BatchPolicy, Records.ToArray(), Batch); + SetBatchOperate(BatchPolicy, null, null, null, Records.ToArray(), Batch); } protected internal override bool ParseRow() @@ -192,7 +192,7 @@ protected internal override bool IsWrite() protected internal override void WriteBuffer() { - SetBatchOperate(BatchPolicy, (IList)Records, Batch); + SetBatchOperate(BatchPolicy, null, null, null, (IList)Records, Batch); } protected internal override bool ParseRow() diff --git a/AerospikeClientProxy/Proxy/GRPCCommand.cs b/AerospikeClientProxy/Proxy/GRPCCommand.cs index 902fab53..67d2126d 100644 --- a/AerospikeClientProxy/Proxy/GRPCCommand.cs +++ b/AerospikeClientProxy/Proxy/GRPCCommand.cs @@ -468,7 +468,7 @@ public override void SetExists(Policy policy, Key key) End(); } - public override void SetRead(Policy policy, Key key) + public void SetRead(Policy policy, Key key) { Begin(); int fieldCount = EstimateKeySize(policy, key); @@ -884,7 +884,8 @@ int readAttr // Batch Read/Write Operations //-------------------------------------------------- - public override void SetBatchOperate(BatchPolicy policy, IList records, BatchNode batch) + public override void SetBatchOperate(BatchPolicy policy, BatchWritePolicy writePolicy, + BatchUDFPolicy udfPolicy, BatchDeletePolicy deletePolicy, IList records, BatchNode batch) { // Estimate full row size int[] offsets = batch.offsets; @@ -1535,7 +1536,7 @@ ulong taskId WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); // Write taskId field - WriteField(taskId, FieldType.TRAN_ID); + WriteField(taskId, FieldType.QUERY_ID); if (binNames != null) { @@ -1745,7 +1746,7 @@ bool background WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); // Write taskId field - WriteField(taskId, FieldType.TRAN_ID); + WriteField(taskId, FieldType.QUERY_ID); if (statement.filter != null) { From 8080cb140bc4d8840449240ce1de6b37bfe43394 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Tue, 6 Aug 2024 12:36:20 -0600 Subject: [PATCH 02/41] Batch working now --- AerospikeClient/Async/AsyncBatch.cs | 8 +- AerospikeClient/Command/Batch.cs | 4 +- AerospikeClient/Command/Command.cs | 195 +++++---- AerospikeClient/Main/AerospikeClient.cs | 22 +- AerospikeClient/Policy/Policy.cs | 1 + AerospikeClientProxy/Proxy/BatchProxy.cs | 9 +- AerospikeClientProxy/Proxy/GRPCCommand.cs | 3 +- AerospikeTest/Sync/Basic/TestTran.cs | 492 ++++++++++++++++++++++ 8 files changed, 634 insertions(+), 100 deletions(-) create mode 100644 AerospikeTest/Sync/Basic/TestTran.cs diff --git a/AerospikeClient/Async/AsyncBatch.cs b/AerospikeClient/Async/AsyncBatch.cs index af0561af..efa00f13 100644 --- a/AerospikeClient/Async/AsyncBatch.cs +++ b/AerospikeClient/Async/AsyncBatch.cs @@ -86,7 +86,7 @@ protected internal override void WriteBuffer() { if (batch.node.HasBatchAny) { - SetBatchOperate(batchPolicy, null, null, null, records, batch); + SetBatchOperate(batchPolicy, records, batch); } else { @@ -197,7 +197,7 @@ protected internal override void WriteBuffer() { if (batch.node.HasBatchAny) { - SetBatchOperate(batchPolicy, null, null, null, records, batch); + SetBatchOperate(batchPolicy, records, batch); } else { @@ -790,7 +790,7 @@ protected internal override bool IsWrite() protected internal override void WriteBuffer() { - SetBatchOperate(batchPolicy, null, null, null, records, batch); + SetBatchOperate(batchPolicy, records, batch); } protected internal override void ParseRow() @@ -935,7 +935,7 @@ protected internal override bool IsWrite() protected internal override void WriteBuffer() { - SetBatchOperate(batchPolicy, null, null, null, records, batch); + SetBatchOperate(batchPolicy, records, batch); } protected internal override void ParseRow() diff --git a/AerospikeClient/Command/Batch.cs b/AerospikeClient/Command/Batch.cs index fee63f92..79b4b39c 100644 --- a/AerospikeClient/Command/Batch.cs +++ b/AerospikeClient/Command/Batch.cs @@ -42,7 +42,7 @@ protected internal override void WriteBuffer() { if (batch.node != null && batch.node.HasBatchAny) { - SetBatchOperate(batchPolicy, null, null, null, records, batch); + SetBatchOperate(batchPolicy, records, batch); } else { @@ -236,7 +236,7 @@ protected internal override bool IsWrite() protected internal override void WriteBuffer() { - SetBatchOperate(batchPolicy, null, null, null, (IList)records, batch); + SetBatchOperate(batchPolicy, (IList)records, batch); } protected internal override bool ParseRow() diff --git a/AerospikeClient/Command/Command.cs b/AerospikeClient/Command/Command.cs index 2b50cfe5..fcba2a1e 100644 --- a/AerospikeClient/Command/Command.cs +++ b/AerospikeClient/Command/Command.cs @@ -137,10 +137,12 @@ public void SetTranVerify(Tran tran, Key key, long ver) int fieldCount = EstimateKeySize(key); // Version field. - dataOffset += 7; + dataOffset += 7 + FIELD_HEADER_SIZE; fieldCount++; + //bool compress = SizeBuffer(policy); TODO SizeBuffer(); + dataOffset += 8; dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA); dataBuffer[dataOffset++] = (byte)0; @@ -215,14 +217,15 @@ BatchOffsets offsets } } - SizeBuffer(); + bool compress = SizeBuffer(policy); WriteBatchHeader(policy, totalTimeout, 1); int fieldSizeOffset = dataOffset; WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - dataOffset += ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset+= 4; dataBuffer[dataOffset++] = GetBatchFlags(policy); prev = null; @@ -232,7 +235,8 @@ BatchOffsets offsets Key key = keys[offset]; long? ver = versions[offset]; - dataOffset += ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); @@ -272,7 +276,7 @@ BatchOffsets offsets // Write real field size. ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); - End(policy.compress); + End(compress); } public void SetTranMarkRollForward(Tran tran, Key key) @@ -295,6 +299,7 @@ public void SetTranRoll(Key key, Tran tran, int tranAttr) fieldCount += SizeTran(key, tran, false); SizeBuffer(); + dataOffset += 8; dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; dataBuffer[dataOffset++] = (byte)0; dataBuffer[dataOffset++] = (byte)(Command.INFO2_WRITE | Command.INFO2_DURABLE_DELETE); @@ -374,7 +379,7 @@ BatchOffsets offsets } } - SizeBuffer(); + bool compress = SizeBuffer(policy); WriteBatchHeader(policy, totalTimeout, fieldCount); @@ -392,7 +397,8 @@ BatchOffsets offsets Key key = keys[offset]; long ver = versions[i]; - dataOffset += ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); @@ -413,7 +419,7 @@ BatchOffsets offsets // Write real field size. ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); - End(policy.compress); + End(compress); } public void SetTranClose(Tran tran, Key key) @@ -428,7 +434,7 @@ public void SetTranClose(Tran tran, Key key) private void WriteTranMonitor(Key key, int readAttr, int writeAttr, int fieldCount, int opCount) { SizeBuffer(); - + dataOffset += 8; dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; dataBuffer[dataOffset++] = (byte)readAttr; dataBuffer[dataOffset++] = (byte)writeAttr; @@ -464,7 +470,7 @@ public virtual void SetWrite(WritePolicy policy, Operation.Type operation, Key k EstimateOperationSize(bin); } - bool compress = SizeBuffer(policy); + bool compress = SizeBuffer(policy); // TODO this is different from java WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, bins.Length); WriteKey(policy, key, true); @@ -750,14 +756,14 @@ public void SetOperate(Policy policy, BatchAttr attr, Key key, Operation[] ops) int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); dataOffset += attr.opSize; - SizeBuffer(); + bool compress = SizeBuffer(policy); WriteKeyAttr(policy, key, attr, exp, fieldCount, ops.Length); foreach (Operation op in ops) { WriteOperation(op); } - End(policy.compress); + End(compress); } @@ -806,12 +812,12 @@ public void SetUdf(Policy policy, BatchAttr attr, Key key, string packageName, s int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); fieldCount += EstimateUdfSize(packageName, functionName, argBytes); - SizeBuffer(); + bool compress = SizeBuffer(policy); WriteKeyAttr(policy, key, attr, exp, fieldCount, 0); WriteField(packageName, FieldType.UDF_PACKAGE_NAME); WriteField(functionName, FieldType.UDF_FUNCTION); WriteField(argBytes, FieldType.UDF_ARGLIST); - End(policy.compress); + End(compress); } //-------------------------------------------------- @@ -896,7 +902,8 @@ public virtual void SetBatchRead(BatchPolicy policy, List records, Ba int fieldSizeOffset = dataOffset; WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - dataOffset += ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; prev = null; @@ -1037,14 +1044,16 @@ int readAttr int fieldSizeOffset = dataOffset; WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - dataOffset += ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; prev = null; for (int i = 0; i < max; i++) { int index = offsets[i]; - dataOffset += ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); + dataOffset += 4; Key key = keys[index]; byte[] digest = key.digest; @@ -1067,7 +1076,7 @@ int readAttr dataBuffer[dataOffset++] = (byte)readAttr; WriteBatchFields(key, 0, binNames.Length); - foreach (string binName in binNames) + foreach (String binName in binNames) { WriteOperation(binName, Operation.Type.READ); } @@ -1098,24 +1107,17 @@ int readAttr public virtual void SetBatchOperate( BatchPolicy policy, - BatchWritePolicy writePolicy, - BatchUDFPolicy udfPolicy, - BatchDeletePolicy deletePolicy, IList records, BatchNode batch) { BatchOffsetsNative offsets = new BatchOffsetsNative(batch); - SetBatchOperate(policy, writePolicy, udfPolicy, deletePolicy, records, offsets); + SetBatchOperate(policy, records, offsets); } public void SetBatchOperate( BatchPolicy policy, - BatchWritePolicy writePolicy, - BatchUDFPolicy udfPolicy, - BatchDeletePolicy deletePolicy, IList records, - BatchOffsets offsets - ) + BatchOffsets offsets) { Begin(); int max = offsets.Size(); @@ -1171,16 +1173,17 @@ BatchOffsets offsets prev = record; } } - SizeBuffer(); + bool compress = SizeBuffer(policy); WriteBatchHeader(policy, totalTimeout, fieldCount); policy.filterExp?.Write(this); int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - dataOffset += ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; dataBuffer[dataOffset++] = GetBatchFlags(policy); BatchAttr attr = new(); @@ -1190,15 +1193,16 @@ BatchOffsets offsets { int offset = offsets.Get(i); BatchRecord record = (BatchRecord)records[offset]; - long? ver = versions?[i]; - - dataOffset += ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + long? ver = (versions != null) ? versions[i] : null; + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; Key key = record.key; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; + if (CanRepeat(policy, key, record, prev, ver)) { // Can set repeat previous namespace/bin names to save space. @@ -1224,11 +1228,11 @@ BatchOffsets offsets if (br.binNames != null) { - if (br.binNames.Length > 0) + if (br.binNames.Length > 0) { - WriteBatchBinNames(key, tran, ver, br.binNames, attr, attr.filterExp); + WriteBatchBinNames(key, tran, ver, br.binNames, attr, attr.filterExp); } - else + else { attr.AdjustRead(true); WriteBatchRead(key, tran, ver, attr, attr.filterExp, 0); @@ -1250,9 +1254,15 @@ BatchOffsets offsets case BatchRecord.Type.BATCH_WRITE: { BatchWrite bw = (BatchWrite)record; - BatchWritePolicy bwp = (bw.policy != null) ? bw.policy : writePolicy; - attr.SetWrite(bwp); + if (bw.policy != null) + { + attr.SetWrite(bw.policy); + } + else + { + attr.SetWrite(policy); + } attr.AdjustWrite(bw.ops); WriteBatchOperations(key, tran, ver, bw.ops, attr, attr.filterExp); break; @@ -1261,10 +1271,16 @@ BatchOffsets offsets case BatchRecord.Type.BATCH_UDF: { BatchUDF bu = (BatchUDF)record; - BatchUDFPolicy bup = (bu.policy != null) ? bu.policy : udfPolicy; - attr.SetUDF(bup); - WriteBatchWrite(key, tran, ver, attr, attr.filterExp, 3, 0); + if (bu.policy != null) + { + attr.SetUDF(bu.policy); + } + else + { + attr.SetUDF(policy); + } + WriteBatchWrite(key, policy.Tran, null, attr, attr.filterExp, 3, 0); WriteField(bu.packageName, FieldType.UDF_PACKAGE_NAME); WriteField(bu.functionName, FieldType.UDF_FUNCTION); WriteField(bu.argBytes, FieldType.UDF_ARGLIST); @@ -1274,9 +1290,15 @@ BatchOffsets offsets case BatchRecord.Type.BATCH_DELETE: { BatchDelete bd = (BatchDelete)record; - BatchDeletePolicy bdp = (bd.policy != null) ? bd.policy : deletePolicy; - attr.SetDelete(bdp); + if (bd.policy != null) + { + attr.SetDelete(bd.policy); + } + else + { + attr.SetDelete(policy); + } WriteBatchWrite(key, tran, ver, attr, attr.filterExp, 0, 0); break; } @@ -1286,8 +1308,8 @@ BatchOffsets offsets } // Write real field size. - ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); - End(policy.compress); + ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); + End(compress); } public virtual void SetBatchOperate @@ -1401,7 +1423,7 @@ BatchOffsets offsets } } - SizeBuffer(); + bool compress = SizeBuffer(policy); WriteBatchHeader(policy, totalTimeout, fieldCount); @@ -1410,7 +1432,8 @@ BatchOffsets offsets int fieldSizeOffset = dataOffset; WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - dataOffset += ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; dataBuffer[dataOffset++] = GetBatchFlags(policy); prev = null; @@ -1420,7 +1443,8 @@ BatchOffsets offsets Key key = keys[offset]; long? ver = versions?[i]; - dataOffset += ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); @@ -1456,7 +1480,7 @@ BatchOffsets offsets // Write real field size. ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); - End(policy.compress); + End(compress); } public virtual void SetBatchUDF( @@ -1546,7 +1570,7 @@ BatchOffsets offsets } } - SizeBuffer(); + bool compress = SizeBuffer(policy); WriteBatchHeader(policy, totalTimeout, fieldCount); @@ -1555,7 +1579,8 @@ BatchOffsets offsets int fieldSizeOffset = dataOffset; WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - dataOffset += ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; dataBuffer[dataOffset++] = GetBatchFlags(policy); prev = null; @@ -1565,7 +1590,8 @@ BatchOffsets offsets Key key = keys[offset]; long? ver = versions?[i]; - dataOffset += ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; byte[] digest = key.digest; Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); @@ -1589,7 +1615,7 @@ BatchOffsets offsets // Write real field size. ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); - End(policy.compress); + End(compress); } private static bool CanRepeat( @@ -1677,13 +1703,9 @@ private void WriteBatchHeader(Policy policy, int timeout, int fieldCount) dataOffset += 8; dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. dataBuffer[dataOffset++] = (byte)readAttr; - dataBuffer[dataOffset++] = (byte)0; - dataBuffer[dataOffset++] = (byte)0; - for (int i = 0; i < 10; i++) - { - dataBuffer[dataOffset++] = 0; - } + Array.Clear(dataBuffer, dataOffset, 12); + dataOffset += 12; dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); @@ -1718,24 +1740,27 @@ private void WriteBatchOperations(Key key, Tran tran, long? ver, Operation[] ops private void WriteBatchRead(Key key, Tran tran, long? ver, BatchAttr attr, Expression filter, int opCount) { - dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_TTL); dataBuffer[dataOffset++] = (byte)attr.readAttr; dataBuffer[dataOffset++] = (byte)attr.writeAttr; dataBuffer[dataOffset++] = (byte)attr.infoAttr; dataBuffer[dataOffset++] = (byte)attr.tranAttr; - dataOffset += ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; WriteBatchFields(key, tran, ver, attr,filter, 0, opCount); } private void WriteBatchWrite(Key key, Tran tran, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) { - dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_GEN | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_GEN | BATCH_MSG_TTL); dataBuffer[dataOffset++] = (byte)attr.readAttr; dataBuffer[dataOffset++] = (byte)attr.writeAttr; dataBuffer[dataOffset++] = (byte)attr.infoAttr; dataBuffer[dataOffset++] = (byte)attr.tranAttr; - dataOffset += ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset); + dataOffset += 2; + ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; WriteBatchFields(key, tran, ver, attr, filter, fieldCount, opCount); } @@ -1794,8 +1819,10 @@ private void WriteBatchFields(Key key, Tran tran, long? ver, BatchAttr attr, Exp private void WriteBatchFields(Key key, int fieldCount, int opCount) { fieldCount += 2; - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)opCount, dataBuffer, dataOffset); + ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += 2; + ByteUtil.ShortToBytes((ushort)opCount, dataBuffer, dataOffset); + dataOffset += 2; WriteField(key.ns, FieldType.NAMESPACE); WriteField(key.setName, FieldType.TABLE); } @@ -1907,7 +1934,8 @@ NodePartitions nodePartitions foreach (PartitionStatus part in nodePartitions.partsFull) { - dataOffset += ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); + ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); + dataOffset += 2; } } @@ -2259,7 +2287,8 @@ NodePartitions nodePartitions foreach (PartitionStatus part in nodePartitions.partsFull) { - dataOffset += ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); + ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); + dataOffset += 2; } } @@ -2280,7 +2309,8 @@ NodePartitions nodePartitions foreach (PartitionStatus part in nodePartitions.partsPartial) { - dataOffset += ByteUtil.LongToLittleBytes(part.bval, dataBuffer, dataOffset); + ByteUtil.LongToLittleBytes(part.bval, dataBuffer, dataOffset); + dataOffset += 8; } } @@ -2771,7 +2801,8 @@ private void WriteOperation(Bin bin, Operation.Type operationType) int nameLength = ByteUtil.StringToUtf8(bin.name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); int valueLength = bin.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength); - dataOffset += ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); + dataOffset += 4; dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); dataBuffer[dataOffset++] = (byte) bin.value.Type; dataBuffer[dataOffset++] = (byte) 0; @@ -2784,7 +2815,8 @@ private void WriteOperation(Operation operation) int nameLength = ByteUtil.StringToUtf8(operation.binName, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); int valueLength = operation.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength); - dataOffset += ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); + dataOffset += 4; dataBuffer[dataOffset++] = Operation.GetProtocolType(operation.type); dataBuffer[dataOffset++] = (byte) operation.value.Type; dataBuffer[dataOffset++] = (byte) 0; @@ -2796,7 +2828,8 @@ private void WriteOperation(string name, Operation.Type operationType) { int nameLength = ByteUtil.StringToUtf8(name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); - dataOffset += ByteUtil.IntToBytes((uint)(nameLength + 4), dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)(nameLength + 4), dataBuffer, dataOffset); + dataOffset += 4; dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); dataBuffer[dataOffset++] = (byte) 0; dataBuffer[dataOffset++] = (byte) 0; @@ -2806,7 +2839,8 @@ private void WriteOperation(string name, Operation.Type operationType) private void WriteOperation(Operation.Type operationType) { - dataOffset += ByteUtil.IntToBytes(4, dataBuffer, dataOffset); + ByteUtil.IntToBytes(4, dataBuffer, dataOffset); + dataOffset += 4; dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); dataBuffer[dataOffset++] = 0; dataBuffer[dataOffset++] = 0; @@ -2890,30 +2924,35 @@ private void WriteField(byte[] bytes, int type) private void WriteField(int val, int type) { WriteFieldHeader(4, type); - dataOffset += ByteUtil.IntToBytes((uint)val, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)val, dataBuffer, dataOffset); + dataOffset += 4; } private void WriteFieldLE(int val, int type) { WriteFieldHeader(4, type); - dataOffset += ByteUtil.IntToLittleBytes((uint)val, dataBuffer, dataOffset); + ByteUtil.IntToLittleBytes((uint)val, dataBuffer, dataOffset); + dataOffset += 4; } private void WriteField(ulong val, int type) { WriteFieldHeader(8, type); - dataOffset += ByteUtil.LongToBytes(val, dataBuffer, dataOffset); + ByteUtil.LongToBytes(val, dataBuffer, dataOffset); + dataOffset += 8; } private void WriteFieldLE(long val, int type) { WriteFieldHeader(8, type); - dataOffset += ByteUtil.LongToLittleBytes((ulong)val, dataBuffer, dataOffset); + ByteUtil.LongToLittleBytes((ulong)val, dataBuffer, dataOffset); + dataOffset += 8; } private void WriteFieldHeader(int size, int type) { - dataOffset += ByteUtil.IntToBytes((uint)size + 1, dataBuffer, dataOffset); + ByteUtil.IntToBytes((uint)size + 1, dataBuffer, dataOffset); + dataOffset += 4; dataBuffer[dataOffset++] = (byte)type; } diff --git a/AerospikeClient/Main/AerospikeClient.cs b/AerospikeClient/Main/AerospikeClient.cs index 852ebb87..94d3200d 100644 --- a/AerospikeClient/Main/AerospikeClient.cs +++ b/AerospikeClient/Main/AerospikeClient.cs @@ -258,7 +258,7 @@ protected internal AerospikeClient(ClientPolicy policy) /// public Policy ReadPolicyDefault { - get { return readPolicyDefault; } + get { return new Policy(readPolicyDefault); } set { readPolicyDefault = value; } } @@ -267,7 +267,7 @@ public Policy ReadPolicyDefault /// public WritePolicy WritePolicyDefault { - get { return writePolicyDefault; } + get { return new WritePolicy(writePolicyDefault); } set { writePolicyDefault = value; } } @@ -276,7 +276,7 @@ public WritePolicy WritePolicyDefault /// public ScanPolicy ScanPolicyDefault { - get { return scanPolicyDefault; } + get { return new ScanPolicy(scanPolicyDefault); } set { scanPolicyDefault = value; } } @@ -285,7 +285,7 @@ public ScanPolicy ScanPolicyDefault /// public QueryPolicy QueryPolicyDefault { - get { return queryPolicyDefault; } + get { return new QueryPolicy(queryPolicyDefault); } set { queryPolicyDefault = value; } } @@ -295,7 +295,7 @@ public QueryPolicy QueryPolicyDefault /// public BatchPolicy BatchPolicyDefault { - get { return batchPolicyDefault; } + get { return new BatchPolicy(batchPolicyDefault); } set { batchPolicyDefault = value; } } @@ -305,7 +305,7 @@ public BatchPolicy BatchPolicyDefault /// public BatchPolicy BatchParentPolicyWriteDefault { - get { return batchParentPolicyWriteDefault; } + get { return new BatchPolicy(batchParentPolicyWriteDefault); } set { batchParentPolicyWriteDefault = value; } } @@ -315,7 +315,7 @@ public BatchPolicy BatchParentPolicyWriteDefault /// public BatchWritePolicy BatchWritePolicyDefault { - get { return batchWritePolicyDefault; } + get { return new BatchWritePolicy(batchWritePolicyDefault); } set { batchWritePolicyDefault = value; } } @@ -324,7 +324,7 @@ public BatchWritePolicy BatchWritePolicyDefault /// public BatchDeletePolicy BatchDeletePolicyDefault { - get { return batchDeletePolicyDefault; } + get { return new BatchDeletePolicy(batchDeletePolicyDefault); } set { batchDeletePolicyDefault = value; } } @@ -333,7 +333,7 @@ public BatchDeletePolicy BatchDeletePolicyDefault /// public BatchUDFPolicy BatchUDFPolicyDefault { - get { return batchUDFPolicyDefault; } + get { return new BatchUDFPolicy(batchUDFPolicyDefault); } set { batchUDFPolicyDefault = value; } } @@ -342,7 +342,7 @@ public BatchUDFPolicy BatchUDFPolicyDefault /// public BatchPolicy TranVerifyPolicyDefault { - get { return tranVerifyPolicyDefault; } + get { return new BatchPolicy(tranVerifyPolicyDefault); } set { tranVerifyPolicyDefault = value; } } @@ -352,7 +352,7 @@ public BatchPolicy TranVerifyPolicyDefault /// public BatchPolicy TranRollPolicyDefault { - get { return tranRollPolicyDefault; } + get { return new BatchPolicy(tranRollPolicyDefault); } set { tranRollPolicyDefault = value; } } diff --git a/AerospikeClient/Policy/Policy.cs b/AerospikeClient/Policy/Policy.cs index 757f2129..aa431247 100644 --- a/AerospikeClient/Policy/Policy.cs +++ b/AerospikeClient/Policy/Policy.cs @@ -305,6 +305,7 @@ public Policy(Policy other) /// public Policy() { + Tran = null; } /// diff --git a/AerospikeClientProxy/Proxy/BatchProxy.cs b/AerospikeClientProxy/Proxy/BatchProxy.cs index 726efc91..98298942 100644 --- a/AerospikeClientProxy/Proxy/BatchProxy.cs +++ b/AerospikeClientProxy/Proxy/BatchProxy.cs @@ -44,7 +44,8 @@ BatchStatus status protected internal override void WriteBuffer() { - SetBatchOperate(BatchPolicy, null, null, null, (IList)Records, Batch); + //SetBatchOperate(BatchPolicy, null, null, null, (IList)Records, Batch); + SetBatchOperate(BatchPolicy, (IList)Records, Batch); } protected internal override bool ParseRow() @@ -141,7 +142,8 @@ BatchStatus status protected internal override void WriteBuffer() { - SetBatchOperate(BatchPolicy, null, null, null, Records.ToArray(), Batch); + //SetBatchOperate(BatchPolicy, null, null, null, Records.ToArray(), Batch); + SetBatchOperate(BatchPolicy, Records.ToArray(), Batch); } protected internal override bool ParseRow() @@ -192,7 +194,8 @@ protected internal override bool IsWrite() protected internal override void WriteBuffer() { - SetBatchOperate(BatchPolicy, null, null, null, (IList)Records, Batch); + //SetBatchOperate(BatchPolicy, null, null, null, (IList)Records, Batch); + SetBatchOperate(BatchPolicy, (IList)Records, Batch); } protected internal override bool ParseRow() diff --git a/AerospikeClientProxy/Proxy/GRPCCommand.cs b/AerospikeClientProxy/Proxy/GRPCCommand.cs index 67d2126d..c3de8feb 100644 --- a/AerospikeClientProxy/Proxy/GRPCCommand.cs +++ b/AerospikeClientProxy/Proxy/GRPCCommand.cs @@ -884,8 +884,7 @@ int readAttr // Batch Read/Write Operations //-------------------------------------------------- - public override void SetBatchOperate(BatchPolicy policy, BatchWritePolicy writePolicy, - BatchUDFPolicy udfPolicy, BatchDeletePolicy deletePolicy, IList records, BatchNode batch) + public override void SetBatchOperate(BatchPolicy policy, IList records, BatchNode batch) { // Estimate full row size int[] offsets = batch.offsets; diff --git a/AerospikeTest/Sync/Basic/TestTran.cs b/AerospikeTest/Sync/Basic/TestTran.cs new file mode 100644 index 00000000..ab30ae82 --- /dev/null +++ b/AerospikeTest/Sync/Basic/TestTran.cs @@ -0,0 +1,492 @@ +/* + * Copyright 2012-2018 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Aerospike.Client; +using System.Reflection; +using System.Text; + +namespace Aerospike.Test +{ + [TestClass] + public class TestTran : TestSync + { + private static readonly string binName = "bin"; + + [ClassInitialize()] + public static void Prepare(TestContext testContext) + { + if (!args.testProxy || (args.testProxy && nativeClient != null)) + { + Assembly assembly = Assembly.GetExecutingAssembly(); + RegisterTask task = nativeClient.Register(null, assembly, "Aerospike.Test.LuaResources.record_example.lua", "record_example.lua", Language.LUA); + task.Wait(); + } + } + + [TestMethod] + public void TranWrite() + { + Key key = new Key(args.ns, args.set, "mrtkey1"); + + client.Put(null, key, new Bin(binName, "val1")); + + Tran tran = new Tran(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + client.Put(wp, key, new Bin(binName, "val2")); + + client.Commit(tran); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TranWriteTwice() + { + Key key = new Key(args.ns, args.set, "mrtkey2"); + + Tran tran = new Tran(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + client.Put(wp, key, new Bin(binName, "val1")); + client.Put(wp, key, new Bin(binName, "val2")); + + client.Commit(tran); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void tranWriteConflict() + { + Key key = new Key(args.ns, args.set, "mrtkey21"); + + Tran tran1 = new Tran(); + Tran tran2 = new Tran(); + + WritePolicy wp1 = client.WritePolicyDefault; + WritePolicy wp2 = client.WritePolicyDefault; + wp1.Tran = tran1; + wp2.Tran = tran2; + + client.Put(wp1, key, new Bin(binName, "val1")); + + try + { + client.Put(wp2, key, new Bin(binName, "val2")); + } + catch (AerospikeException ae) + { + if (ae.Result != ResultCode.MRT_BLOCKED) + { + throw ae; + } + } + + client.Commit(tran1); + client.Commit(tran2); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TranWriteBlock() + { + Key key = new Key(args.ns, args.set, "mrtkey3"); + + client.Put(null, key, new Bin(binName, "val1")); + + Tran tran = new Tran(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + client.Put(wp, key, new Bin(binName, "val2")); + + try + { + // This write should be blocked. + client.Put(null, key, new Bin(binName, "val3")); + throw new AerospikeException("Unexpected success"); + } + catch (AerospikeException e) + { + if (e.Result != ResultCode.MRT_BLOCKED) + { + throw e; + } + } + + client.Commit(tran); + } + + [TestMethod] + public void TranWriteRead() + { + Key key = new Key(args.ns, args.set, "mrtkey4"); + + client.Put(null, key, new Bin(binName, "val1")); + + Tran tran = new Tran(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + client.Put(wp, key, new Bin(binName, "val2")); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + + client.Commit(tran); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TranWriteAbort() + { + Key key = new Key(args.ns, args.set, "mrtkey5"); + + client.Put(null, key, new Bin(binName, "val1")); + + Tran tran = new Tran(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + client.Put(wp, key, new Bin(binName, "val2")); + + Policy p = client.ReadPolicyDefault; + p.Tran = tran; + Record record = client.Get(p, key); + AssertBinEqual(key, record, binName, "val2"); + + client.Abort(tran); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TranDelete() + { + Key key = new Key(args.ns, args.set, "mrtkey6"); + + client.Put(null, key, new Bin(binName, "val1")); + + Tran tran = new Tran(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + wp.durableDelete = true; + client.Delete(wp, key); + + client.Commit(tran); + + Record record = client.Get(null, key); + Assert.IsNull(record); + } + + [TestMethod] + public void TranDeleteAbort() + { + Key key = new Key(args.ns, args.set, "mrtkey7"); + + client.Put(null, key, new Bin(binName, "val1")); + + Tran tran = new Tran(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + wp.durableDelete = true; + client.Delete(wp, key); + + client.Abort(tran); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TranDeleteTwice() + { + Key key = new Key(args.ns, args.set, "mrtkey8"); + + Tran tran = new Tran(); + + client.Put(null, key, new Bin(binName, "val1")); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + wp.durableDelete = true; + client.Delete(wp, key); + client.Delete(wp, key); + + client.Commit(tran); + + Record record = client.Get(null, key); + Assert.IsNull(record); + } + + [TestMethod] + public void TranTouch() + { + Key key = new Key(args.ns, args.set, "mrtkey9"); + + client.Put(null, key, new Bin(binName, "val1")); + + Tran tran = new Tran(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + client.Touch(wp, key); + + client.Commit(tran); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TranTouchAbort() + { + Key key = new Key(args.ns, args.set, "mrtkey10"); + + client.Put(null, key, new Bin(binName, "val1")); + + Tran tran = new Tran(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + client.Touch(wp, key); + + client.Abort(tran); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TranOperateWrite() + { + Key key = new Key(args.ns, args.set, "mrtkey11"); + + client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); + + Tran tran = new Tran(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + Record record = client.Operate(wp, key, + Operation.Put(new Bin(binName, "val2")), + Operation.Get("bin2") + ); + AssertBinEqual(key, record, "bin2", "bal1"); + + client.Commit(tran); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TranOperateWriteAbort() + { + Key key = new Key(args.ns, args.set, "mrtkey12"); + + client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); + + Tran tran = new Tran(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + Record record = client.Operate(wp, key, + Operation.Put(new Bin(binName, "val2")), + Operation.Get("bin2") + ); + AssertBinEqual(key, record, "bin2", "bal1"); + + client.Abort(tran); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TranUDF() + { + Key key = new Key(args.ns, args.set, "mrtkey13"); + + client.Put(null, key, new Bin(binName, "val1")); + + Tran tran = new Tran(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); + + client.Commit(tran); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TranUDFAbort() + { + Key key = new Key(args.ns, args.set, "mrtkey14"); + + client.Put(null, key, new Bin(binName, "val1")); + + Tran tran = new Tran(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Tran = tran; + client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); + + client.Abort(tran); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TranBatch() + { + Key[] keys = new Key[10]; + Bin bin = new Bin(binName, 1); + + for (int i = 0; i < keys.Length; i++) + { + Key key = new Key(args.ns, args.set, i); + keys[i] = key; + + client.Put(null, key, bin); + } + + Record[] recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 1); + + Tran tran = new Tran(); + + bin = new Bin(binName, 2); + + BatchPolicy bp = BatchPolicy.WriteDefault(); + bp.Tran = tran; + + BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); + + if (!bresults.status) + { + StringBuilder sb = new StringBuilder(); + sb.Append("Batch failed:"); + sb.Append(System.Environment.NewLine); + + foreach (BatchRecord br in bresults.records) + { + if (br.resultCode == 0) + { + sb.Append("Record: " + br.record); + } + else + { + sb.Append("ResultCode: " + br.resultCode); + } + sb.Append(System.Environment.NewLine); + } + + throw new AerospikeException(sb.ToString()); + } + + client.Commit(tran); + + recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 2); + } + + [TestMethod] + public void TranBatchAbort() + { + Key[] keys = new Key[10]; + Bin bin = new Bin(binName, 1); + + for (int i = 0; i < keys.Length; i++) + { + Key key = new Key(args.ns, args.set, i); + keys[i] = key; + + client.Put(null, key, bin); + } + + Record[] recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 1); + + Tran tran = new Tran(); + + bin = new Bin(binName, 2); + + BatchPolicy bp = BatchPolicy.WriteDefault(); + bp.Tran = tran; + + BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); + + if (!bresults.status) + { + StringBuilder sb = new StringBuilder(); + sb.Append("Batch failed:"); + sb.Append(System.Environment.NewLine); + + foreach (BatchRecord br in bresults.records) + { + if (br.resultCode == 0) + { + sb.Append("Record: " + br.record); + } + else + { + sb.Append("ResultCode: " + br.resultCode); + } + sb.Append(System.Environment.NewLine); + } + + throw new AerospikeException(sb.ToString()); + } + + client.Abort(tran); + + recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 1); + } + + private void AssertBatchEqual(Key[] keys, Record[] recs, int expected) + { + for (int i = 0; i < keys.Length; i++) + { + Key key = keys[i]; + Record rec = recs[i]; + + Assert.IsNotNull(rec); + + int received = rec.GetInt(binName); + Assert.AreEqual(expected, received); + } + } + } +} From e41b23c1a1efa33e7355ae359e101f184678d017 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Thu, 22 Aug 2024 15:06:17 -0600 Subject: [PATCH 03/41] First pass at async code, async tests still in progress --- AerospikeClient/Admin/Role.cs | 8 +- AerospikeClient/Async/AsyncBatch.cs | 317 +++++++++-- AerospikeClient/Async/AsyncClient.cs | 174 +++++-- AerospikeClient/Async/AsyncCommand.cs | 12 + AerospikeClient/Async/AsyncDelete.cs | 52 +- AerospikeClient/Async/AsyncExecute.cs | 77 ++- AerospikeClient/Async/AsyncExists.cs | 39 +- .../AsyncOperateRead.cs} | 28 +- AerospikeClient/Async/AsyncOperateWrite.cs | 89 ++++ .../Async/AsyncQueryPartitionExecutor.cs | 2 +- AerospikeClient/Async/AsyncRead.cs | 113 +--- AerospikeClient/Async/AsyncReadBase.cs | 65 +++ AerospikeClient/Async/AsyncReadHeader.cs | 44 +- .../Async/AsyncScanPartitionExecutor.cs | 2 +- AerospikeClient/Async/AsyncSingleCommand.cs | 90 +++- AerospikeClient/Async/AsyncTouch.cs | 49 +- .../{AsyncOperate.cs => AsyncTxnAddKeys.cs} | 63 ++- AerospikeClient/Async/AsyncTxnClose.cs | 88 ++++ .../Async/AsyncTxnMarkRollForward.cs | 88 ++++ AerospikeClient/Async/AsyncTxnMonitor.cs | 207 ++++++++ AerospikeClient/Async/AsyncTxnRoll.cs | 430 +++++++++++++++ AerospikeClient/Async/AsyncWrite.cs | 51 +- AerospikeClient/Async/AsyncWriteBase.cs | 80 +++ AerospikeClient/Async/IAsyncClient.cs | 24 +- AerospikeClient/Cluster/Cluster.cs | 25 +- AerospikeClient/Cluster/ClusterStats.cs | 10 +- AerospikeClient/Cluster/Node.cs | 12 +- AerospikeClient/Command/Batch.cs | 50 +- AerospikeClient/Command/BatchAttr.cs | 2 +- AerospikeClient/Command/BatchExecutor.cs | 2 +- AerospikeClient/Command/Command.cs | 54 +- AerospikeClient/Command/DeleteCommand.cs | 41 +- AerospikeClient/Command/ExecuteCommand.cs | 81 ++- AerospikeClient/Command/ExistsCommand.cs | 34 +- AerospikeClient/Command/MultiCommand.cs | 5 - AerospikeClient/Command/OperateCommand.cs | 73 --- AerospikeClient/Command/OperateCommandRead.cs | 35 ++ .../Command/OperateCommandWrite.cs | 65 +++ AerospikeClient/Command/ReadCommand.cs | 140 +---- AerospikeClient/Command/ReadHeaderCommand.cs | 36 +- AerospikeClient/Command/ScanExecutor.cs | 2 +- AerospikeClient/Command/SyncCommand.cs | 134 +++++ AerospikeClient/Command/SyncReadCommand.cs | 53 ++ AerospikeClient/Command/SyncWriteCommand.cs | 104 +--- AerospikeClient/Command/TouchCommand.cs | 42 +- AerospikeClient/Command/TranAddKeys.cs | 107 ---- AerospikeClient/Command/TxnAddKeys.cs | 53 ++ .../Command/{TranClose.cs => TxnClose.cs} | 14 +- ...rkRollForward.cs => TxnMarkRollForward.cs} | 16 +- .../Command/{TranMonitor.cs => TxnMonitor.cs} | 38 +- .../Command/{TranRoll.cs => TxnRoll.cs} | 89 ++-- AerospikeClient/Command/WriteCommand.cs | 10 +- AerospikeClient/Listener/AbortListener.cs | 4 +- AerospikeClient/Listener/CommitListener.cs | 4 +- AerospikeClient/Main/AbortStatus.cs | 45 ++ AerospikeClient/Main/AerospikeClient.cs | 216 ++++++-- AerospikeClient/Main/AerospikeException.cs | 18 +- AerospikeClient/Main/BatchRecord.cs | 2 +- AerospikeClient/Main/CommitStatus.cs | 47 ++ AerospikeClient/Main/IAerospikeClient.cs | 33 +- AerospikeClient/Main/Key.cs | 11 +- AerospikeClient/Main/ResultCode.cs | 6 +- AerospikeClient/Main/{Tran.cs => Txn.cs} | 104 +++- AerospikeClient/Metrics/MetricsWriter.cs | 4 +- AerospikeClient/Policy/ClientPolicy.cs | 8 +- AerospikeClient/Policy/Policy.cs | 6 +- .../{TranRollPolicy.cs => TxnRollPolicy.cs} | 6 +- ...TranVerifyPolicy.cs => TxnVerifyPolicy.cs} | 6 +- AerospikeClient/Query/QueryExecutor.cs | 2 +- .../Query/QueryListenerExecutor.cs | 2 +- .../Query/QueryPartitionExecutor.cs | 2 +- .../Proxy/AerospikeClientProxy.cs | 28 +- .../Proxy/AsyncClientProxy.cs | 10 +- AerospikeTest/Args.cs | 9 + AerospikeTest/Async/TestAsyncTxn.cs | 492 ++++++++++++++++++ .../Sync/Basic/{TestTran.cs => TestTxn.cs} | 188 +++---- AerospikeTest/settings.json | 7 +- 77 files changed, 3377 insertions(+), 1402 deletions(-) rename AerospikeClient/{Main/AbortError.cs => Async/AsyncOperateRead.cs} (56%) create mode 100644 AerospikeClient/Async/AsyncOperateWrite.cs create mode 100644 AerospikeClient/Async/AsyncReadBase.cs rename AerospikeClient/Async/{AsyncOperate.cs => AsyncTxnAddKeys.cs} (51%) create mode 100644 AerospikeClient/Async/AsyncTxnClose.cs create mode 100644 AerospikeClient/Async/AsyncTxnMarkRollForward.cs create mode 100644 AerospikeClient/Async/AsyncTxnMonitor.cs create mode 100644 AerospikeClient/Async/AsyncTxnRoll.cs create mode 100644 AerospikeClient/Async/AsyncWriteBase.cs delete mode 100644 AerospikeClient/Command/OperateCommand.cs create mode 100644 AerospikeClient/Command/OperateCommandRead.cs create mode 100644 AerospikeClient/Command/OperateCommandWrite.cs create mode 100644 AerospikeClient/Command/SyncReadCommand.cs delete mode 100644 AerospikeClient/Command/TranAddKeys.cs create mode 100644 AerospikeClient/Command/TxnAddKeys.cs rename AerospikeClient/Command/{TranClose.cs => TxnClose.cs} (80%) rename AerospikeClient/Command/{TranMarkRollForward.cs => TxnMarkRollForward.cs} (70%) rename AerospikeClient/Command/{TranMonitor.cs => TxnMonitor.cs} (79%) rename AerospikeClient/Command/{TranRoll.cs => TxnRoll.cs} (69%) create mode 100644 AerospikeClient/Main/AbortStatus.cs create mode 100644 AerospikeClient/Main/CommitStatus.cs rename AerospikeClient/Main/{Tran.cs => Txn.cs} (63%) rename AerospikeClient/Policy/{TranRollPolicy.cs => TxnRollPolicy.cs} (90%) rename AerospikeClient/Policy/{TranVerifyPolicy.cs => TxnVerifyPolicy.cs} (90%) create mode 100644 AerospikeTest/Async/TestAsyncTxn.cs rename AerospikeTest/Sync/Basic/{TestTran.cs => TestTxn.cs} (74%) diff --git a/AerospikeClient/Admin/Role.cs b/AerospikeClient/Admin/Role.cs index feaa3130..c94616d0 100644 --- a/AerospikeClient/Admin/Role.cs +++ b/AerospikeClient/Admin/Role.cs @@ -49,22 +49,22 @@ public sealed class Role public const string SIndexAdmin = "sindex-admin"; /// - /// Allow read transactions. + /// Allow read commands. /// public const string Read = "read"; /// - /// Allow read and write transactions. + /// Allow read and write commands. /// public const string ReadWrite = "read-write"; /// - /// Allow read and write transactions within user defined functions. + /// Allow read and write commands within user defined functions. /// public const string ReadWriteUdf = "read-write-udf"; /// - /// Allow write transactions. + /// Allow write commands. /// public const string Write = "write"; diff --git a/AerospikeClient/Async/AsyncBatch.cs b/AerospikeClient/Async/AsyncBatch.cs index efa00f13..83586343 100644 --- a/AerospikeClient/Async/AsyncBatch.cs +++ b/AerospikeClient/Async/AsyncBatch.cs @@ -46,8 +46,7 @@ List records { commands[count++] = new AsyncBatchReadListCommand(this, cluster, batchNode, policy, records); } - // Dispatch commands to nodes. - Execute(commands); + this.commands = commands; } protected internal override void OnSuccess() @@ -153,8 +152,7 @@ List records { commands[count++] = new AsyncBatchReadSequenceCommand(this, cluster, batchNode, policy, listener, records); } - // Dispatch commands to nodes. - Execute(commands); + this.commands = commands; } protected internal override void OnSuccess() @@ -273,8 +271,7 @@ bool isOperation { commands[count++] = new AsyncBatchGetArrayCommand(this, cluster, batchNode, policy, keys, binNames, ops, records, readAttr, isOperation); } - // Dispatch commands to nodes. - Execute(commands); + this.commands = commands; } protected internal override void OnSuccess() @@ -396,8 +393,7 @@ bool isOperation { commands[count++] = new AsyncBatchGetSequenceCommand(this, cluster, batchNode, policy, keys, binNames, ops, listener, readAttr, isOperation); } - // Dispatch commands to nodes. - Execute(commands); + this.commands = commands; } protected internal override void OnSuccess() @@ -526,8 +522,7 @@ ExistsArrayListener listener { commands[count++] = new AsyncBatchExistsArrayCommand(this, cluster, batchNode, policy, keys, existsArray); } - // Dispatch commands to nodes. - Execute(commands); + this.commands = commands; } protected internal override void OnSuccess() @@ -581,13 +576,7 @@ protected internal override void WriteBuffer() protected internal override void ParseRow() { - if (opCount > 0) - { - throw new AerospikeException.Parse("Received bins that were not requested!"); - } - ParseFieldsRead(keys[batchIndex]); - existsArray[batchIndex] = resultCode == 0; } @@ -634,8 +623,7 @@ ExistsSequenceListener listener { commands[count++] = new AsyncBatchExistsSequenceCommand(this, cluster, batchNode, policy, keys, listener); } - // Dispatch commands to nodes. - Execute(commands); + this.commands = commands; } protected internal override void OnSuccess() @@ -690,11 +678,6 @@ protected internal override void WriteBuffer() protected internal override void ParseRow() { - if (opCount > 0) - { - throw new AerospikeException.Parse("Received bins that were not requested!"); - } - Key keyOrig = keys[batchIndex]; ParseFieldsRead(keyOrig); listener.OnExists(keyOrig, resultCode == 0); @@ -745,8 +728,7 @@ List records { tasks[count++] = new AsyncBatchOperateListCommand(this, cluster, batchNode, policy, records); } - // Dispatch commands to nodes. - Execute(tasks); + this.commands = tasks; } protected internal override void OnSuccess() @@ -839,6 +821,11 @@ internal override void SetInDoubt(bool inDoubt) if (record.resultCode == ResultCode.NO_RESPONSE) { record.inDoubt = record.hasWrite; + + if (record.inDoubt && policy.Txn != null) + { + policy.Txn.OnWriteInDoubt(record.key); + } } } } @@ -886,8 +873,7 @@ List records { tasks[count++] = new AsyncBatchOperateSequenceCommand(this, cluster, batchNode, policy, listener, records); } - // Dispatch commands to nodes. - Execute(tasks); + this.commands = tasks; } protected internal override void OnSuccess() @@ -988,6 +974,11 @@ internal override void SetInDoubt(bool inDoubt) // Set inDoubt, but do not call OnRecord() because user already has access to full // BatchRecord list and can examine each record for inDoubt when the exception occurs. record.inDoubt = record.hasWrite; + + if (record.inDoubt && policy.Txn != null) + { + policy.Txn.OnWriteInDoubt(record.key); + } } } } @@ -1044,8 +1035,7 @@ BatchAttr attr { tasks[count++] = new AsyncBatchOperateRecordArrayCommand(this, cluster, batchNode, policy, keys, ops, records, attr); } - // Dispatch commands to nodes. - Execute(tasks); + this.commands = tasks; } protected internal override void OnSuccess() @@ -1132,7 +1122,9 @@ internal override void SetInDoubt(bool inDoubt) if (record.resultCode == ResultCode.NO_RESPONSE) { - record.inDoubt = inDoubt; + record.inDoubt = true; + + policy.Txn?.OnWriteInDoubt(record.key); } } } @@ -1184,8 +1176,7 @@ BatchAttr attr { tasks[count++] = new AsyncBatchOperateRecordSequenceCommand(this, cluster, batchNode, policy, keys, ops, sent, listener, attr); } - // Dispatch commands to nodes. - Execute(tasks); + this.commands = tasks; } public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) @@ -1283,6 +1274,12 @@ internal override void SetInDoubt(bool inDoubt) Key key = keys[index]; BatchRecord record = new(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); sent[index] = true; + + if (record.inDoubt && policy.Txn != null) + { + policy.Txn.OnWriteInDoubt(key); + } + AsyncBatch.OnRecord(cluster, listener, record, index); } } @@ -1342,8 +1339,7 @@ BatchAttr attr { tasks[count++] = new AsyncBatchUDFArrayCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, recordArray, attr); } - // Dispatch commands to nodes. - Execute(tasks); + this.commands = tasks; } protected internal override void OnSuccess() @@ -1453,7 +1449,9 @@ internal override void SetInDoubt(bool inDoubt) if (record.resultCode == ResultCode.NO_RESPONSE) { - record.inDoubt = inDoubt; + record.inDoubt = true; + + policy.Txn?.OnWriteInDoubt(record.key); } } } @@ -1507,8 +1505,7 @@ BatchAttr attr { tasks[count++] = new AsyncBatchUDFSequenceCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, sent, listener, attr); } - // Dispatch commands to nodes. - Execute(tasks); + this.commands = tasks; } public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) @@ -1629,6 +1626,12 @@ internal override void SetInDoubt(bool inDoubt) Key key = keys[index]; BatchRecord record = new(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); sent[index] = true; + + if (record.inDoubt && policy.Txn != null) + { + policy.Txn.OnWriteInDoubt(record.key); + } + AsyncBatch.OnRecord(cluster, listener, record, index); } } @@ -1650,6 +1653,228 @@ internal override List GenerateBatchNodes() } } + //------------------------------------------------------- + // MRT + //------------------------------------------------------- + + public sealed class AsyncBatchTxnVerifyExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordArrayListener listener; + private readonly BatchRecord[] records; + + public AsyncBatchTxnVerifyExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordArrayListener listener, + Key[] keys, + long[] versions, + BatchRecord[] records + ) : base(cluster, true) + { + this.listener = listener; + this.records = records; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, false, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchTxnVerifyCommand(this, cluster, batchNode, policy, policy.Txn, keys, versions, records); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(records, GetStatus()); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(records, ae); + } + } + + sealed class AsyncBatchTxnVerifyCommand : AsyncBatchCommand + { + private Txn txn; + private Key[] keys; + private long[] versions; + private BatchRecord[] records; + + public AsyncBatchTxnVerifyCommand( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Txn txn, + Key[] keys, + long[] versions, // TODO does this need to be long? + BatchRecord[] records + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.txn = txn; + this.keys = keys; + this.versions = versions; + this.records = records; + } + + public AsyncBatchTxnVerifyCommand(AsyncBatchTxnVerifyCommand other) : base(other) + { + this.txn = other.txn; + this.keys = other.keys; + this.versions = other.versions; + this.records = other.records; + } + + protected internal override void WriteBuffer() + { + SetBatchTxnVerify(batchPolicy, txn, keys, versions, batch); + } + + protected internal override void ParseRow() + { + SkipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == ResultCode.OK) + { + record.resultCode = resultCode; + } + else + { + record.SetError(resultCode, false); + parent.SetRowError(); + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchTxnVerifyCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchTxnVerifyCommand(parent, cluster, batchNode, batchPolicy, txn, keys, versions, records); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); + } + } + + public sealed class AsyncBatchTxnRollExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordArrayListener listener; + private readonly BatchRecord[] records; + + public AsyncBatchTxnRollExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordArrayListener listener, + Key[] keys, + BatchRecord[] records, + BatchAttr attr + ) : base(cluster, true) + { + this.listener = listener; + this.records = records; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, false, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchTxnRollCommand(this, cluster, batchNode, policy, keys, records, attr); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(records, GetStatus()); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(records, ae); + } + } + + sealed class AsyncBatchTxnRollCommand : AsyncBatchCommand + { + private Key[] keys; + private BatchRecord[] records; + private BatchAttr attr; + + public AsyncBatchTxnRollCommand( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + BatchRecord[] records, + BatchAttr attr + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.keys = keys; + this.records = records; + this.attr = attr; + } + + public AsyncBatchTxnRollCommand(AsyncBatchTxnRollCommand other) : base(other) + { + this.keys = other.keys; + this.attr = other.attr; + this.records = other.records; + } + + protected internal override void WriteBuffer() + { + SetBatchTxnRoll(batchPolicy, keys, batch, attr); + } + + protected internal override void ParseRow() + { + SkipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == ResultCode.OK) + { + record.resultCode = resultCode; + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + parent.SetRowError(); + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchTxnRollCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchTxnRollCommand(parent, cluster, batchNode, batchPolicy, keys, records, attr); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, true, parent); + } + } + //------------------------------------------------------- // Batch Base Executor //------------------------------------------------------- @@ -1661,11 +1886,17 @@ public abstract class AsyncBatchExecutor : IBatchStatus private int count; private readonly bool hasResultCode; private bool error; + public AsyncBatchCommand[] commands; public AsyncBatchExecutor(AsyncCluster cluster, bool hasResultCode) { this.hasResultCode = hasResultCode; - cluster.AddTran(); + cluster.AddCommand(); + } + + public void Execute() + { + Execute(commands); } public void Execute(AsyncBatchCommand[] commands) @@ -1810,10 +2041,10 @@ protected override Latency.LatencyType GetLatencyType() protected void ParseFieldsRead(Key key) { - if (policy.Tran != null) + if (policy.Txn != null) { long? version = ParseVersion(fieldCount); - policy.Tran.OnRead(key, version); + policy.Txn.OnRead(key, version); } else { @@ -1823,17 +2054,17 @@ protected void ParseFieldsRead(Key key) protected void ParseFields(Key key, bool hasWrite) { - if (policy.Tran != null) + if (policy.Txn != null) { long? version = ParseVersion(fieldCount); if (hasWrite) { - policy.Tran.OnWrite(key, version, resultCode); + policy.Txn.OnWrite(key, version, resultCode); } else { - policy.Tran.OnRead(key, version); + policy.Txn.OnRead(key, version); } } else diff --git a/AerospikeClient/Async/AsyncClient.cs b/AerospikeClient/Async/AsyncClient.cs index af766393..da41e32c 100644 --- a/AerospikeClient/Async/AsyncClient.cs +++ b/AerospikeClient/Async/AsyncClient.cs @@ -136,42 +136,48 @@ public AsyncClient(AsyncClientPolicy policy, params Host[] hosts) /// Asynchronously attempt to commit the given multi-record transaction. First, the expected /// record versions are sent to the server nodes for verification.If all nodes return success, /// the transaction is committed.Otherwise, the transaction is aborted. - ///

+ /// /// This method registers the command with an event loop and returns. /// The event loop thread will process the command and send the results to the listener. - ///

+ /// /// Requires server version 8.0+ - ///

+ /// ///
/// where to send results - /// multi-record transaction - public void Commit(CommitListener listener, Tran tran) + /// multi-record transaction + public void Commit(CommitListener listener, Txn txn) { - tran.SetRollAttempted(); + if (!txn.SetRollAttempted()) + { + listener.OnSuccess(CommitStatus.CommitStatusType.ALREADY_ATTEMPTED); + } - //AsyncTranRoll tm = new AsyncTranRoll( - //cluster, tranVerifyPolicyDefault, tranRollPolicyDefault, tran - //); - //tm.Commit(listener); + AsyncTxnRoll atr = new( + cluster, txnVerifyPolicyDefault, txnRollPolicyDefault, txn + ); + atr.Commit(listener); } /// /// Asynchronously abort and rollback the given multi-record transaction. - ///

- /// This method registers the command with an event loop and returns. - /// The event loop thread will process the command and send the results to the listener. - ///

+ /// + /// This method registers the command with an event loop and returns. + /// The event loop thread will process the command and send the results to the listener. + /// /// Requires server version 8.0+ - ///

+ /// ///
/// where to send results - /// multi-record transaction - public void Abort(AbortListener listener, Tran tran) + /// multi-record transaction + public void Abort(AbortListener listener, Txn txn) { - tran.SetRollAttempted(); + if (!txn.SetRollAttempted()) + { + listener.OnSuccess(AbortStatus.AbortStatusType.ALREADY_ATTEMPTED); + } - //AsyncTranRoll tm = new AsyncTranRoll(cluster, null, tranRollPolicyDefault, tran); - //tm.Abort(listener); + AsyncTxnRoll atr = new(cluster, null, txnRollPolicyDefault, txn); + atr.Abort(listener); } //------------------------------------------------------- @@ -219,7 +225,7 @@ public void Put(WritePolicy policy, WriteListener listener, Key key, params Bin[ policy = writePolicyDefault; } AsyncWrite async = new AsyncWrite(cluster, policy, listener, key, bins, Operation.Type.WRITE); - async.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, async); } //------------------------------------------------------- @@ -269,7 +275,7 @@ public void Append(WritePolicy policy, WriteListener listener, Key key, params B policy = writePolicyDefault; } AsyncWrite async = new AsyncWrite(cluster, policy, listener, key, bins, Operation.Type.APPEND); - async.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, async); } /// @@ -315,7 +321,7 @@ public void Prepend(WritePolicy policy, WriteListener listener, Key key, params policy = writePolicyDefault; } AsyncWrite async = new AsyncWrite(cluster, policy, listener, key, bins, Operation.Type.PREPEND); - async.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, async); } //------------------------------------------------------- @@ -363,7 +369,7 @@ public void Add(WritePolicy policy, WriteListener listener, Key key, params Bin[ policy = writePolicyDefault; } AsyncWrite async = new AsyncWrite(cluster, policy, listener, key, bins, Operation.Type.ADD); - async.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, async); } //------------------------------------------------------- @@ -401,7 +407,7 @@ public void Delete(WritePolicy policy, DeleteListener listener, Key key) policy = writePolicyDefault; } AsyncDelete async = new AsyncDelete(cluster, policy, key, listener); - async.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, async); } /// @@ -457,7 +463,8 @@ public void Delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePolicy, Batc BatchAttr attr = new BatchAttr(); attr.SetDelete(deletePolicy); - new AsyncBatchOperateRecordArrayExecutor(cluster, batchPolicy, listener, keys, null, attr); + AsyncBatchOperateRecordArrayExecutor executor = new(cluster, batchPolicy, listener, keys, null, attr); + AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys); } /// @@ -497,7 +504,8 @@ public void Delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePolicy, Batc BatchAttr attr = new BatchAttr(); attr.SetDelete(deletePolicy); - new AsyncBatchOperateRecordSequenceExecutor(cluster, batchPolicy, listener, keys, null, attr); + AsyncBatchOperateRecordSequenceExecutor executor = new(cluster, batchPolicy, listener, keys, null, attr); + AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys); } //------------------------------------------------------- @@ -537,7 +545,7 @@ public void Touch(WritePolicy policy, WriteListener listener, Key key) policy = writePolicyDefault; } AsyncTouch async = new AsyncTouch(cluster, policy, listener, key); - async.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, async); } //------------------------------------------------------- @@ -574,6 +582,9 @@ public void Exists(Policy policy, ExistsListener listener, Key key) { policy = readPolicyDefault; } + + policy.Txn?.SetNamespace(key.ns); + AsyncExists async = new AsyncExists(cluster, policy, key, listener); async.Execute(); } @@ -613,7 +624,10 @@ public void Exists(BatchPolicy policy, ExistsArrayListener listener, Key[] keys) { policy = batchPolicyDefault; } - new AsyncBatchExistsArrayExecutor(cluster, policy, keys, listener); + policy.Txn?.SetNamespace(keys); + + AsyncBatchExistsArrayExecutor executor = new(cluster, policy, keys, listener); + executor.Execute(); } /// @@ -636,7 +650,10 @@ public void Exists(BatchPolicy policy, ExistsSequenceListener listener, Key[] ke { policy = batchPolicyDefault; } - new AsyncBatchExistsSequenceExecutor(cluster, policy, keys, listener); + policy.Txn?.SetNamespace(keys); + + AsyncBatchExistsSequenceExecutor executor = new(cluster, policy, keys, listener); + executor.Execute(); } //------------------------------------------------------- @@ -673,6 +690,9 @@ public void Get(Policy policy, RecordListener listener, Key key) { policy = readPolicyDefault; } + + policy.Txn?.SetNamespace(key.ns); + AsyncRead async = new AsyncRead(cluster, policy, listener, key, (string[])null); async.Execute(); } @@ -709,6 +729,9 @@ public void Get(Policy policy, RecordListener listener, Key key, params string[] { policy = readPolicyDefault; } + + policy.Txn?.SetNamespace(key.ns); + AsyncRead async = new AsyncRead(cluster, policy, listener, key, binNames); async.Execute(); } @@ -743,6 +766,9 @@ public void GetHeader(Policy policy, RecordListener listener, Key key) { policy = readPolicyDefault; } + + policy.Txn?.SetNamespace(key.ns); + AsyncReadHeader async = new AsyncReadHeader(cluster, policy, listener, key); async.Execute(); } @@ -795,7 +821,10 @@ public void Get(BatchPolicy policy, BatchListListener listener, List { policy = batchPolicyDefault; } - new AsyncBatchReadListExecutor(cluster, policy, listener, records); + policy.Txn?.SetNamespace(records); + + AsyncBatchReadListExecutor executor = new(cluster, policy, listener, records); + executor.Execute(); } /// @@ -823,7 +852,10 @@ public void Get(BatchPolicy policy, BatchSequenceListener listener, List @@ -867,7 +899,10 @@ public void Get(BatchPolicy policy, RecordArrayListener listener, Key[] keys) { policy = batchPolicyDefault; } - new AsyncBatchGetArrayExecutor(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false); + policy.Txn?.SetNamespace(keys); + + AsyncBatchGetArrayExecutor executor = new(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false); + executor.Execute(); } /// @@ -893,7 +928,10 @@ public void Get(BatchPolicy policy, RecordSequenceListener listener, Key[] keys) { policy = batchPolicyDefault; } - new AsyncBatchGetSequenceExecutor(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false); + policy.Txn?.SetNamespace(keys); + + AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false); + executor.Execute(); } /// @@ -939,7 +977,10 @@ public void Get(BatchPolicy policy, RecordArrayListener listener, Key[] keys, pa { policy = batchPolicyDefault; } - new AsyncBatchGetArrayExecutor(cluster, policy, listener, keys, binNames, null, Command.INFO1_READ, false); + policy.Txn?.SetNamespace(keys); + + AsyncBatchGetArrayExecutor executor = new(cluster, policy, listener, keys, binNames, null, Command.INFO1_READ, false); + executor.Execute(); } /// @@ -966,7 +1007,10 @@ public void Get(BatchPolicy policy, RecordSequenceListener listener, Key[] keys, { policy = batchPolicyDefault; } - new AsyncBatchGetSequenceExecutor(cluster, policy, listener, keys, binNames, null, Command.INFO1_READ, false); + policy.Txn?.SetNamespace(keys); + + AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, binNames, null, Command.INFO1_READ, false); + executor.Execute(); } /// @@ -1014,7 +1058,11 @@ public void Get(BatchPolicy policy, RecordArrayListener listener, Key[] keys, pa { policy = batchPolicyDefault; } - new AsyncBatchGetArrayExecutor(cluster, policy, listener, keys, null, ops, Command.INFO1_READ, true); + + policy.Txn?.SetNamespace(keys); + + AsyncBatchGetArrayExecutor executor = new(cluster, policy, listener, keys, null, ops, Command.INFO1_READ, true); + executor.Execute(); } /// @@ -1043,7 +1091,11 @@ public void Get(BatchPolicy policy, RecordSequenceListener listener, Key[] keys, { policy = batchPolicyDefault; } - new AsyncBatchGetSequenceExecutor(cluster, policy, listener, keys, null, ops, Command.INFO1_READ, true); + + policy.Txn?.SetNamespace(keys); + + AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, null, ops, Command.INFO1_READ, true); + executor.Execute(); } /// @@ -1087,7 +1139,10 @@ public void GetHeader(BatchPolicy policy, RecordArrayListener listener, Key[] ke { policy = batchPolicyDefault; } - new AsyncBatchGetArrayExecutor(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false); + policy.Txn?.SetNamespace(keys); + + AsyncBatchGetArrayExecutor executor = new(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false); + executor.Execute(); } /// @@ -1113,7 +1168,10 @@ public void GetHeader(BatchPolicy policy, RecordSequenceListener listener, Key[] { policy = batchPolicyDefault; } - new AsyncBatchGetSequenceExecutor(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false); + policy.Txn?.SetNamespace(keys); + + AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false); + executor.Execute(); } //------------------------------------------------------- @@ -1162,8 +1220,20 @@ public Task Operate(WritePolicy policy, CancellationToken token, Key key public void Operate(WritePolicy policy, RecordListener listener, Key key, params Operation[] ops) { OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, ops); - AsyncOperate async = new AsyncOperate(cluster, listener, key, args); - async.Execute(); + policy = args.writePolicy; + + if (args.hasWrite) + { + AsyncOperateWrite async = new(cluster, listener, key, args); + AsyncTxnMonitor.Execute(cluster, policy, async); + } + else + { + policy.Txn?.SetNamespace(key.ns); + + AsyncOperateRead async = new(cluster, listener, key, args); + async.Execute(); + } } //------------------------------------------------------- @@ -1217,7 +1287,8 @@ public void Operate(BatchPolicy policy, BatchOperateListListener listener, List< { policy = batchParentPolicyWriteDefault; } - new AsyncBatchOperateListExecutor(cluster, policy, listener, records); + AsyncBatchOperateListExecutor executor = new(cluster, policy, listener, records); + AsyncTxnMonitor.ExecuteBatch(policy, executor, records); } /// @@ -1251,7 +1322,8 @@ public void Operate(BatchPolicy policy, BatchRecordSequenceListener listener, Li { policy = batchParentPolicyWriteDefault; } - new AsyncBatchOperateSequenceExecutor(cluster, policy, listener, records); + AsyncBatchOperateSequenceExecutor executor = new(cluster, policy, listener, records); + AsyncTxnMonitor.ExecuteBatch(policy, executor, records); } /// @@ -1315,7 +1387,8 @@ public void Operate(BatchPolicy batchPolicy, BatchWritePolicy writePolicy, Batch } BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops); - new AsyncBatchOperateRecordArrayExecutor(cluster, batchPolicy, listener, keys, ops, attr); + AsyncBatchOperateRecordArrayExecutor executor = new(cluster, batchPolicy, listener, keys, ops, attr); + AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys); } /// @@ -1358,7 +1431,8 @@ public void Operate(BatchPolicy batchPolicy, BatchWritePolicy writePolicy, Batch } BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops); - new AsyncBatchOperateRecordSequenceExecutor(cluster, batchPolicy, listener, keys, ops, attr); + AsyncBatchOperateRecordSequenceExecutor executor = new(cluster, batchPolicy, listener, keys, ops, attr); + AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys); } //------------------------------------------------------- @@ -1468,7 +1542,7 @@ public void Execute(WritePolicy policy, ExecuteListener listener, Key key, strin policy = writePolicyDefault; } AsyncExecute command = new AsyncExecute(cluster, policy, listener, key, packageName, functionName, functionArgs); - command.Execute(); + AsyncTxnMonitor.Execute(cluster, policy, command); } /// @@ -1534,7 +1608,8 @@ public void Execute(BatchPolicy batchPolicy, BatchUDFPolicy udfPolicy, BatchReco BatchAttr attr = new BatchAttr(); attr.SetUDF(udfPolicy); - new AsyncBatchUDFArrayExecutor(cluster, batchPolicy, listener, keys, packageName, functionName, argBytes, attr); + AsyncBatchUDFArrayExecutor executor = new(cluster, batchPolicy, listener, keys, packageName, functionName, argBytes, attr); + AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys); } /// @@ -1581,7 +1656,8 @@ public void Execute(BatchPolicy batchPolicy, BatchUDFPolicy udfPolicy, BatchReco BatchAttr attr = new BatchAttr(); attr.SetUDF(udfPolicy); - new AsyncBatchUDFSequenceExecutor(cluster, batchPolicy, listener, keys, packageName, functionName, argBytes, attr); + AsyncBatchUDFSequenceExecutor executor = new(cluster, batchPolicy, listener, keys, packageName, functionName, argBytes, attr); + AsyncTxnMonitor.ExecuteBatch(batchPolicy, executor, keys); } //------------------------------------------------------- diff --git a/AerospikeClient/Async/AsyncCommand.cs b/AerospikeClient/Async/AsyncCommand.cs index 4a295901..8a70a0c0 100644 --- a/AerospikeClient/Async/AsyncCommand.cs +++ b/AerospikeClient/Async/AsyncCommand.cs @@ -908,6 +908,12 @@ private void NotifyFailure(AerospikeException ae) ae.Policy = policy; ae.Iteration = iteration; ae.SetInDoubt(IsWrite(), commandSentCounter); + + if (ae.InDoubt) + { + OnInDoubt(); + } + OnFailure(ae); } catch (Exception e) @@ -943,6 +949,12 @@ internal void ReleaseBuffer() } } + // Do nothing by default. Write commands will override this method. + protected internal virtual void OnInDoubt() + { + + } + protected internal virtual bool RetryBatch() { return false; diff --git a/AerospikeClient/Async/AsyncDelete.cs b/AerospikeClient/Async/AsyncDelete.cs index cd63221b..99e79ea1 100644 --- a/AerospikeClient/Async/AsyncDelete.cs +++ b/AerospikeClient/Async/AsyncDelete.cs @@ -17,31 +17,22 @@ namespace Aerospike.Client { - public sealed class AsyncDelete : AsyncSingleCommand + public sealed class AsyncDelete : AsyncWriteBase { - private readonly WritePolicy writePolicy; private readonly DeleteListener listener; - private readonly Key key; - private readonly Partition partition; private bool existed; public AsyncDelete(AsyncCluster cluster, WritePolicy writePolicy, Key key, DeleteListener listener) - : base(cluster, writePolicy) + : base(cluster, writePolicy, key) { - this.writePolicy = writePolicy; this.listener = listener; - this.key = key; - this.partition = Partition.Write(cluster, policy, key); - cluster.AddTran(); + cluster.AddCommand(); } public AsyncDelete(AsyncDelete other) : base(other) { - this.writePolicy = other.writePolicy; this.listener = other.listener; - this.key = other.key; - this.partition = other.partition; } protected internal override AsyncCommand CloneCommand() @@ -49,40 +40,25 @@ protected internal override AsyncCommand CloneCommand() return new AsyncDelete(this); } - protected internal override bool IsWrite() - { - return true; - } - - protected internal override Node GetNode(Cluster cluster) - { - return partition.GetNodeWrite(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } - protected internal override void WriteBuffer() { - SetDelete(writePolicy, key); + SetDelete(writePolicy, Key); } - protected internal override void ParseResult() + protected internal override bool ParseResult() { - int resultCode = dataBuffer[dataOffset + 5]; + ParseHeader(); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { existed = true; - return; + return true; } if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { existed = false; - return; + return true; } if (resultCode == ResultCode.FILTERED_OUT) @@ -92,23 +68,17 @@ protected internal override void ParseResult() throw new AerospikeException(resultCode); } existed = true; - return; + return true; } throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryWrite(timeout); - return true; - } - protected internal override void OnSuccess() { if (listener != null) { - listener.OnSuccess(key, existed); + listener.OnSuccess(Key, existed); } } diff --git a/AerospikeClient/Async/AsyncExecute.cs b/AerospikeClient/Async/AsyncExecute.cs index d3cea1f3..01503c9d 100644 --- a/AerospikeClient/Async/AsyncExecute.cs +++ b/AerospikeClient/Async/AsyncExecute.cs @@ -15,15 +15,17 @@ * the License. */ +using System; + namespace Aerospike.Client { - public sealed class AsyncExecute : AsyncRead + public sealed class AsyncExecute : AsyncWriteBase { - private readonly WritePolicy writePolicy; private readonly ExecuteListener executeListener; private readonly string packageName; private readonly string functionName; private readonly Value[] args; + private Record record; public AsyncExecute ( @@ -36,7 +38,6 @@ public AsyncExecute Value[] args ) : base(cluster, writePolicy, key) { - this.writePolicy = writePolicy; this.executeListener = listener; this.packageName = packageName; this.functionName = functionName; @@ -46,7 +47,6 @@ Value[] args public AsyncExecute(AsyncExecute other) : base(other) { - this.writePolicy = other.writePolicy; this.executeListener = other.executeListener; this.packageName = other.packageName; this.functionName = other.functionName; @@ -58,35 +58,66 @@ protected internal override AsyncCommand CloneCommand() return new AsyncExecute(this); } - protected internal override bool IsWrite() + protected internal override void WriteBuffer() { - return true; + SetUdf(writePolicy, Key, packageName, functionName, args); } - protected internal override Node GetNode(Cluster cluster) + protected internal override bool ParseResult() { - return partition.GetNodeWrite(cluster); - } + ParseHeader(); + ParseFields(policy.Txn, Key, true); - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } + if (resultCode == ResultCode.OK) + { + record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, false); + return true; + } - protected internal override void WriteBuffer() - { - SetUdf(writePolicy, key, packageName, functionName, args); - } + if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, false); + HandleUdfError(resultCode); + return true; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return true; + } - protected internal override void HandleNotFound(int resultCode) - { throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) + private void HandleUdfError(int resultCode) { - partition.PrepareRetryWrite(timeout); - return true; + string ret = (string)record.bins["FAILURE"]; + + if (ret == null) + { + throw new AerospikeException(resultCode); + } + + String message; + int code; + + try + { + string[] list = ret.Split(":"); + Int32.TryParse(list[2].Trim(), out code); + message = list[0] + ':' + list[1] + ' ' + list[3]; + } + catch (Exception e) + { + // Use generic exception if parse error occurs. + throw new AerospikeException(resultCode, ret); + } + + throw new AerospikeException(code, message); } protected internal override void OnSuccess() @@ -94,7 +125,7 @@ protected internal override void OnSuccess() if (executeListener != null) { object obj = ParseEndResult(); - executeListener.OnSuccess(key, obj); + executeListener.OnSuccess(Key, obj); } } diff --git a/AerospikeClient/Async/AsyncExists.cs b/AerospikeClient/Async/AsyncExists.cs index 4217acb5..e57daa30 100644 --- a/AerospikeClient/Async/AsyncExists.cs +++ b/AerospikeClient/Async/AsyncExists.cs @@ -17,28 +17,21 @@ namespace Aerospike.Client { - public sealed class AsyncExists : AsyncSingleCommand + public sealed class AsyncExists : AsyncReadBase { private readonly ExistsListener listener; - private readonly Key key; - private readonly Partition partition; private bool exists; public AsyncExists(AsyncCluster cluster, Policy policy, Key key, ExistsListener listener) - : base(cluster, policy) + : base(cluster, policy, key) { this.listener = listener; - this.key = key; - this.partition = Partition.Read(cluster, policy, key); - cluster.AddTran(); } public AsyncExists(AsyncExists other) : base(other) { this.listener = other.listener; - this.key = other.key; - this.partition = other.partition; } protected internal override AsyncCommand CloneCommand() @@ -46,35 +39,25 @@ protected internal override AsyncCommand CloneCommand() return new AsyncExists(this); } - protected internal override Node GetNode(Cluster cluster) - { - return partition.GetNodeRead(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.READ; - } - protected internal override void WriteBuffer() { SetExists(policy, key); } - protected internal override void ParseResult() + protected internal override bool ParseResult() { - int resultCode = dataBuffer[dataOffset + 5]; + ParseHeader(); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { exists = true; - return; + return true; } if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { exists = false; - return; + return true; } if (resultCode == ResultCode.FILTERED_OUT) @@ -84,18 +67,12 @@ protected internal override void ParseResult() throw new AerospikeException(resultCode); } exists = true; - return; + return true; } throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryRead(timeout); - return true; - } - protected internal override void OnSuccess() { if (listener != null) diff --git a/AerospikeClient/Main/AbortError.cs b/AerospikeClient/Async/AsyncOperateRead.cs similarity index 56% rename from AerospikeClient/Main/AbortError.cs rename to AerospikeClient/Async/AsyncOperateRead.cs index 83997c91..076fc4ca 100644 --- a/AerospikeClient/Main/AbortError.cs +++ b/AerospikeClient/Async/AsyncOperateRead.cs @@ -17,25 +17,25 @@ namespace Aerospike.Client { - /// - /// Multi-record transaction (MRT) abort error status. - /// - public static class AbortError + public sealed class AsyncOperateRead : AsyncRead { - public enum AbortErrorType + private readonly OperateArgs args; + + public AsyncOperateRead(AsyncCluster cluster, RecordListener listener, Key key, OperateArgs args) + : base(cluster, args.writePolicy, listener, key, true) + { + this.args = args; + } + + public AsyncOperateRead(AsyncOperateRead other) + : base(other) { - ROLL_BACK_ABANDONED, - CLOSE_ABANDONED + this.args = other.args; } - public static string AbortErrorToString(AbortErrorType error) + protected internal override void WriteBuffer() { - return error switch - { - AbortErrorType.ROLL_BACK_ABANDONED => "MRT client roll back abandoned. Server will eventually abort the MRT.", - AbortErrorType.CLOSE_ABANDONED => "MRT has been rolled back, but MRT client close was abandoned. Server will eventually close the MRT.", - _ => "Unexpected AbortErrorType" - }; + SetOperate(args.writePolicy, key, args); } } } diff --git a/AerospikeClient/Async/AsyncOperateWrite.cs b/AerospikeClient/Async/AsyncOperateWrite.cs new file mode 100644 index 00000000..2e2093be --- /dev/null +++ b/AerospikeClient/Async/AsyncOperateWrite.cs @@ -0,0 +1,89 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +using System; + +namespace Aerospike.Client +{ + public sealed class AsyncOperateWrite : AsyncWriteBase + { + private readonly RecordListener listener; + private readonly OperateArgs args; + private Record record; + + public AsyncOperateWrite(AsyncCluster cluster, RecordListener listener, Key key, OperateArgs args) + : base(cluster, args.writePolicy, key) + { + this.args = args; + } + + public AsyncOperateWrite(AsyncOperateWrite other) + : base(other) + { + this.args = other.args; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncOperateWrite(this); + } + + protected internal override void WriteBuffer() + { + SetOperate(args.writePolicy, Key, args); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + ParseFields(policy.Txn, Key, true); + + if (resultCode == ResultCode.OK) + { + record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, true); + return true; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key, record); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + } + } +} diff --git a/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs b/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs index 65e4d5d5..747c100c 100644 --- a/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs +++ b/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs @@ -41,7 +41,7 @@ PartitionTracker tracker this.statement = statement; this.tracker = tracker; - cluster.AddTran(); + cluster.AddCommand(); tracker.SleepBetweenRetries = 0; taskId = statement.PrepareTaskId(); QueryPartitions(); diff --git a/AerospikeClient/Async/AsyncRead.cs b/AerospikeClient/Async/AsyncRead.cs index 5cb5ebb8..b83b3a22 100644 --- a/AerospikeClient/Async/AsyncRead.cs +++ b/AerospikeClient/Async/AsyncRead.cs @@ -17,59 +17,37 @@ namespace Aerospike.Client { - public class AsyncRead : AsyncSingleCommand + public class AsyncRead : AsyncReadBase { private readonly RecordListener listener; - protected internal readonly Key key; private readonly string[] binNames; private readonly bool isOperation; - protected readonly Partition partition; protected Record record; // Read constructor. public AsyncRead(AsyncCluster cluster, Policy policy, RecordListener listener, Key key, string[] binNames) - : base(cluster, policy) + : base(cluster, policy, key) { this.listener = listener; - this.key = key; this.binNames = binNames; this.isOperation = false; - this.partition = Partition.Read(cluster, policy, key); - cluster.AddTran(); - } - - // UDF constructor. - public AsyncRead(AsyncCluster cluster, WritePolicy policy, Key key) - : base(cluster, policy) - { - this.listener = null; - this.key = key; - this.binNames = null; - this.isOperation = false; - this.partition = Partition.Write(cluster, policy, key); - cluster.AddTran(); } // Operate constructor. - public AsyncRead(AsyncCluster cluster, Policy policy, RecordListener listener, Key key, Partition partition, bool isOperation) - : base(cluster, policy) + public AsyncRead(AsyncCluster cluster, Policy policy, RecordListener listener, Key key, bool isOperation) + : base(cluster, policy, key) { this.listener = listener; - this.key = key; this.binNames = null; this.isOperation = isOperation; - this.partition = partition; - cluster.AddTran(); } public AsyncRead(AsyncRead other) : base(other) { this.listener = other.listener; - this.key = other.key; this.binNames = other.binNames; this.isOperation = other.isOperation; - this.partition = other.partition; } protected internal override AsyncCommand CloneCommand() @@ -77,47 +55,25 @@ protected internal override AsyncCommand CloneCommand() return new AsyncRead(this); } - protected internal override Node GetNode(Cluster cluster) - { - return partition.GetNodeRead(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.READ; - } - protected internal override void WriteBuffer() { SetRead(policy, key, binNames); } - protected internal sealed override void ParseResult() + protected internal sealed override bool ParseResult() { - int resultCode = dataBuffer[dataOffset + 5]; - int generation = ByteUtil.BytesToInt(dataBuffer, dataOffset + 6); - int expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset + 10); - int fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset + 18); - int opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset + 20); - dataOffset += Command.MSG_REMAINING_HEADER_SIZE; + ParseHeader(); + ParseFields(policy.Txn, key, false); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { - if (opCount == 0) - { - // Bin data was not returned. - record = new Record(null, generation, expiration); - return; - } - SkipKey(fieldCount); record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); - return; + return true; } if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { - HandleNotFound(resultCode); - return; + return true; } if (resultCode == ResultCode.FILTERED_OUT) @@ -126,59 +82,12 @@ record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, ge { throw new AerospikeException(resultCode); } - return; - } - - if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - SkipKey(fieldCount); - record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); - HandleUdfError(resultCode); - return; + return true; } throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryRead(timeout); - return true; - } - - protected internal virtual void HandleNotFound(int resultCode) - { - // Do nothing in default case. Record will be null. - } - - private void HandleUdfError(int resultCode) - { - object obj; - - if (!record.bins.TryGetValue("FAILURE", out obj)) - { - throw new AerospikeException(resultCode); - } - - string ret = (string)obj; - string message; - int code; - - try - { - string[] list = ret.Split(':'); - code = Convert.ToInt32(list[2].Trim()); - message = list[0] + ':' + list[1] + ' ' + list[3]; - } - catch (Exception) - { - // Use generic exception if parse error occurs. - throw new AerospikeException(resultCode, ret); - } - - throw new AerospikeException(code, message); - } - protected internal override void OnSuccess() { if (listener != null) diff --git a/AerospikeClient/Async/AsyncReadBase.cs b/AerospikeClient/Async/AsyncReadBase.cs new file mode 100644 index 00000000..2229adba --- /dev/null +++ b/AerospikeClient/Async/AsyncReadBase.cs @@ -0,0 +1,65 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public abstract class AsyncReadBase : AsyncSingleCommand + { + protected internal readonly Key key; + protected readonly Partition partition; + + public AsyncReadBase(AsyncCluster cluster, Policy policy, Key key) + : base(cluster, policy) + { + this.key = key; + this.partition = Partition.Read(cluster, policy, key); + cluster.AddCommand(); + } + + public AsyncReadBase(AsyncReadBase other) + : base(other) + { + this.key = other.key; + this.partition = other.partition; + } + + protected internal override bool IsWrite() + { + return false; + } + + protected internal override Node GetNode(Cluster cluster) + { + return partition.GetNodeRead(cluster); + } + + protected override Latency.LatencyType GetLatencyType() + { + return Latency.LatencyType.READ; + } + + protected internal override bool PrepareRetry(bool timeout) + { + partition.PrepareRetryRead(timeout); + return true; + } + + protected internal abstract override void WriteBuffer(); + + protected internal abstract override bool ParseResult(); + } +} diff --git a/AerospikeClient/Async/AsyncReadHeader.cs b/AerospikeClient/Async/AsyncReadHeader.cs index f65f4e35..cc75781a 100644 --- a/AerospikeClient/Async/AsyncReadHeader.cs +++ b/AerospikeClient/Async/AsyncReadHeader.cs @@ -17,28 +17,22 @@ namespace Aerospike.Client { - public sealed class AsyncReadHeader : AsyncSingleCommand + public sealed class AsyncReadHeader : AsyncReadBase { private readonly RecordListener listener; - private readonly Key key; - private readonly Partition partition; private Record record; public AsyncReadHeader(AsyncCluster cluster, Policy policy, RecordListener listener, Key key) - : base(cluster, policy) + : base(cluster, policy, key) { this.listener = listener; - this.key = key; - this.partition = Partition.Read(cluster, policy, key); - cluster.AddTran(); + cluster.AddCommand(); } public AsyncReadHeader(AsyncReadHeader other) : base(other) { this.listener = other.listener; - this.key = other.key; - this.partition = other.partition; } protected internal override AsyncCommand CloneCommand() @@ -46,37 +40,25 @@ protected internal override AsyncCommand CloneCommand() return new AsyncReadHeader(this); } - protected internal override Node GetNode(Cluster cluster) - { - return partition.GetNodeRead(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.READ; - } - protected internal override void WriteBuffer() { SetReadHeader(policy, key); } - protected internal override void ParseResult() + protected internal override bool ParseResult() { - int resultCode = dataBuffer[dataOffset + 5]; + ParseHeader(); + ParseFields(policy.Txn, key, false); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { - int generation = ByteUtil.BytesToInt(dataBuffer, dataOffset + 6); - int expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset + 10); - record = new Record(null, generation, expiration); - return; + return true; } if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { - return; + return true; } if (resultCode == ResultCode.FILTERED_OUT) @@ -85,18 +67,12 @@ protected internal override void ParseResult() { throw new AerospikeException(resultCode); } - return; + return true; } throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryRead(timeout); - return true; - } - protected internal override void OnSuccess() { if (listener != null) diff --git a/AerospikeClient/Async/AsyncScanPartitionExecutor.cs b/AerospikeClient/Async/AsyncScanPartitionExecutor.cs index fd43e5d9..120fd7e2 100644 --- a/AerospikeClient/Async/AsyncScanPartitionExecutor.cs +++ b/AerospikeClient/Async/AsyncScanPartitionExecutor.cs @@ -46,7 +46,7 @@ PartitionTracker tracker this.binNames = binNames; this.tracker = tracker; - cluster.AddTran(); + cluster.AddCommand(); tracker.SleepBetweenRetries = 0; ScanPartitions(); } diff --git a/AerospikeClient/Async/AsyncSingleCommand.cs b/AerospikeClient/Async/AsyncSingleCommand.cs index 6ce6abf2..bad45ec1 100644 --- a/AerospikeClient/Async/AsyncSingleCommand.cs +++ b/AerospikeClient/Async/AsyncSingleCommand.cs @@ -19,6 +19,12 @@ namespace Aerospike.Client { public abstract class AsyncSingleCommand : AsyncCommand { + protected int resultCode; + protected int generation; + protected int expiration; + protected int fieldCount; + protected int opCount; + public AsyncSingleCommand(AsyncCluster cluster, Policy policy) : base(cluster, policy) { @@ -35,6 +41,88 @@ protected internal sealed override void ParseCommand() Finish(); } - protected internal abstract void ParseResult(); + protected void ParseHeader() + { + resultCode = dataBuffer[dataOffset + 5]; + generation = ByteUtil.BytesToInt(dataBuffer, dataOffset + 6); + expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset + 10); + fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset + 18); + opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset + 20); + dataOffset += Command.MSG_REMAINING_HEADER_SIZE; + } + + protected void ParseFields(Txn tran, Key key, bool hasWrite) + { + if (tran == null) + { + SkipFields(fieldCount); + return; + } + + long? version = null; + + for (int i = 0; i < fieldCount; i++) + { + int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.RECORD_VERSION) + { + if (size == 7) + { + version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset); + } + else + { + throw new AerospikeException("Record version field has invalid size: " + size); + } + } + dataOffset += size; + } + + if (hasWrite) + { + tran.OnWrite(key, version, resultCode); + } + else + { + tran.OnRead(key, version); + } + } + + protected void SkipFields(int fieldCount) + { + // There can be fields in the response (setname etc). + // But for now, ignore them. Expose them to the API if needed in the future. + for (int i = 0; i < fieldCount; i++) + { + int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4 + fieldlen; + } + } + + protected void ParseTranDeadline(Txn txn) + { + for (int i = 0; i < fieldCount; i++) + { + int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.MRT_DEADLINE) + { + int deadline = ByteUtil.LittleBytesToInt(dataBuffer, dataOffset); + txn.Deadline = deadline; + } + dataOffset += size; + } + } + + protected internal abstract bool ParseResult(); } } diff --git a/AerospikeClient/Async/AsyncTouch.cs b/AerospikeClient/Async/AsyncTouch.cs index d38f5251..bd998ff3 100644 --- a/AerospikeClient/Async/AsyncTouch.cs +++ b/AerospikeClient/Async/AsyncTouch.cs @@ -17,30 +17,20 @@ namespace Aerospike.Client { - public sealed class AsyncTouch : AsyncSingleCommand + public sealed class AsyncTouch : AsyncWriteBase { - private readonly WritePolicy writePolicy; private readonly WriteListener listener; - private readonly Key key; - private readonly Partition partition; public AsyncTouch(AsyncCluster cluster, WritePolicy writePolicy, WriteListener listener, Key key) - : base(cluster, writePolicy) + : base(cluster, writePolicy, key) { - this.writePolicy = writePolicy; this.listener = listener; - this.key = key; - this.partition = Partition.Write(cluster, policy, key); - cluster.AddTran(); } public AsyncTouch(AsyncTouch other) : base(other) { - this.writePolicy = other.writePolicy; this.listener = other.listener; - this.key = other.key; - this.partition = other.partition; } protected internal override AsyncCommand CloneCommand() @@ -48,33 +38,18 @@ protected internal override AsyncCommand CloneCommand() return new AsyncTouch(this); } - protected internal override bool IsWrite() - { - return true; - } - - protected internal override Node GetNode(Cluster cluster) - { - return partition.GetNodeWrite(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } - protected internal override void WriteBuffer() { - SetTouch(writePolicy, key); + SetTouch(writePolicy, Key); } - protected internal override void ParseResult() + protected internal override bool ParseResult() { - int resultCode = dataBuffer[dataOffset + 5]; + ParseHeader(); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { - return; + return true; } if (resultCode == ResultCode.FILTERED_OUT) @@ -83,23 +58,17 @@ protected internal override void ParseResult() { throw new AerospikeException(resultCode); } - return; + return true; } throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryWrite(timeout); - return true; - } - protected internal override void OnSuccess() { if (listener != null) { - listener.OnSuccess(key); + listener.OnSuccess(Key); } } diff --git a/AerospikeClient/Async/AsyncOperate.cs b/AerospikeClient/Async/AsyncTxnAddKeys.cs similarity index 51% rename from AerospikeClient/Async/AsyncOperate.cs rename to AerospikeClient/Async/AsyncTxnAddKeys.cs index 736c7ec7..66cdfc68 100644 --- a/AerospikeClient/Async/AsyncOperate.cs +++ b/AerospikeClient/Async/AsyncTxnAddKeys.cs @@ -17,68 +17,79 @@ namespace Aerospike.Client { - public sealed class AsyncOperate : AsyncRead + public sealed class AsyncTxnAddKeys : AsyncWriteBase { + private readonly RecordListener listener; private readonly OperateArgs args; - public AsyncOperate(AsyncCluster cluster, RecordListener listener, Key key, OperateArgs args) - : base(cluster, args.writePolicy, listener, key, args.GetPartition(cluster, key), true) + public AsyncTxnAddKeys + ( + AsyncCluster cluster, + RecordListener listener, + Key key, + OperateArgs args + ) : base(cluster, args.writePolicy, key) { + this.listener = listener; this.args = args; } - public AsyncOperate(AsyncOperate other) + public AsyncTxnAddKeys(AsyncTxnAddKeys other) : base(other) { + this.listener = other.listener; this.args = other.args; } protected internal override AsyncCommand CloneCommand() { - return new AsyncOperate(this); + return new AsyncTxnAddKeys(this); } - protected internal override bool IsWrite() + protected internal override void WriteBuffer() { - return args.hasWrite; + SetTxnAddKeys(args.writePolicy, Key, args); } - protected internal override Node GetNode(Cluster cluster) + protected internal override bool ParseResult() { - return args.hasWrite ? partition.GetNodeWrite(cluster) : partition.GetNodeRead(cluster); + ParseHeader(); + ParseTranDeadline(policy.Txn); + + if (resultCode == ResultCode.OK) + { + return true; + } + + throw new AerospikeException(resultCode); } - protected override Latency.LatencyType GetLatencyType() + protected internal override bool PrepareRetry(bool timeout) { - return args.hasWrite ? Latency.LatencyType.WRITE : Latency.LatencyType.READ; + partition.PrepareRetryWrite(timeout); + return true; } - protected internal override void WriteBuffer() + protected internal override void OnInDoubt() { - SetOperate(args.writePolicy, key, args); + policy.Txn.SetMonitorInDoubt(); } - protected internal override void HandleNotFound(int resultCode) + protected internal override void OnSuccess() { - // Only throw not found exception for command with write operations. - // Read-only command operations return a null record. - if (args.hasWrite) + if (listener != null) { - throw new AerospikeException(resultCode); + listener.OnSuccess(Key, null); } } - protected internal override bool PrepareRetry(bool timeout) + protected internal override void OnFailure(AerospikeException e) { - if (args.hasWrite) - { - partition.PrepareRetryWrite(timeout); - } - else + if (listener != null) { - partition.PrepareRetryRead(timeout); + listener.OnFailure(e); } - return true; } } } + diff --git a/AerospikeClient/Async/AsyncTxnClose.cs b/AerospikeClient/Async/AsyncTxnClose.cs new file mode 100644 index 00000000..e982e9a0 --- /dev/null +++ b/AerospikeClient/Async/AsyncTxnClose.cs @@ -0,0 +1,88 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class AsyncTxnClose : AsyncWriteBase + { + private readonly Txn txn; + private readonly DeleteListener listener; + + public AsyncTxnClose + ( + AsyncCluster cluster, + Txn txn, + DeleteListener listener, + WritePolicy writePolicy, + Key key + ) : base(cluster, writePolicy, key) + { + this.txn = txn; + this.listener = listener; + } + + public AsyncTxnClose(AsyncTxnClose other) + : base(other) + { + this.txn = other.txn; + this.listener = other.listener; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncTxnClose(this); + } + + protected internal override void WriteBuffer() + { + SetTxnClose(txn, Key); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + + if (resultCode == ResultCode.OK || resultCode == ResultCode.KEY_NOT_FOUND_ERROR) + { + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnInDoubt() + { + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key, true); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + } + } +} + diff --git a/AerospikeClient/Async/AsyncTxnMarkRollForward.cs b/AerospikeClient/Async/AsyncTxnMarkRollForward.cs new file mode 100644 index 00000000..6ebd03f4 --- /dev/null +++ b/AerospikeClient/Async/AsyncTxnMarkRollForward.cs @@ -0,0 +1,88 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class AsyncTxnMarkRollForward : AsyncWriteBase + { + private readonly Txn txn; + private readonly WriteListener listener; + + public AsyncTxnMarkRollForward + ( + AsyncCluster cluster, + Txn txn, + WriteListener listener, + WritePolicy writePolicy, + Key key + ) : base(cluster, writePolicy, key) + { + this.txn = txn; + this.listener = listener; + } + + public AsyncTxnMarkRollForward(AsyncTxnMarkRollForward other) + : base(other) + { + this.txn = other.txn; + this.listener = other.listener; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncTxnMarkRollForward(this); + } + + protected internal override void WriteBuffer() + { + SetTxnMarkRollForward(txn, Key); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + + if (resultCode == ResultCode.OK || resultCode == ResultCode.BIN_EXISTS_ERROR) + { + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnInDoubt() + { + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + } + } +} + diff --git a/AerospikeClient/Async/AsyncTxnMonitor.cs b/AerospikeClient/Async/AsyncTxnMonitor.cs new file mode 100644 index 00000000..f7f20c36 --- /dev/null +++ b/AerospikeClient/Async/AsyncTxnMonitor.cs @@ -0,0 +1,207 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Aerospike.Client; +using System.Diagnostics.Metrics; +using System.Diagnostics; + +namespace Aerospike.Client +{ + public abstract class AsyncTxnMonitor + { + public static void Execute(AsyncCluster cluster, WritePolicy policy, AsyncWriteBase command) + { + if (policy.Txn == null) + { + // Command is not run under a MRT monitor. Run original command. + command.Execute(); + return; + } + + Txn txn = policy.Txn; + Key cmdKey = command.Key; + + if (txn.Writes.Contains(cmdKey)) + { + // MRT monitor already contains this key. Run original command. + command.Execute(); + return; + } + + // Add key to MRT monitor and then run original command. + Operation[] ops = TxnMonitor.GetTranOps(txn, cmdKey); + SingleTxnMonitor stm = new(cluster, command); + stm.Execute(policy, ops); + } + + public static void ExecuteBatch( + BatchPolicy policy, + AsyncBatchExecutor executor, + Key[] keys + ) + { + if (policy.Txn == null) + { + // Command is not run under a MRT monitor. Run original command. + executor.Execute(executor.commands); + return; + } + + // Add write keys to MRT monitor and then run original command. + Operation[] ops = TxnMonitor.GetTranOps(policy.Txn, keys); + BatchTxnMonitor ate = new(executor); + ate.Execute(policy, ops); + } + + public static void ExecuteBatch( + BatchPolicy policy, + AsyncBatchExecutor executor, + List records + ) + { + if (policy.Txn == null) + { + // Command is not run under a MRT monitor. Run original command. + executor.Execute(); + return; + } + + // Add write keys to MRT monitor and then run original command. + Operation[] ops = TxnMonitor.GetTranOps(policy.Txn, records); + + if (ops == null) + { + // Readonly batch does not need to add key digests. Run original command. + executor.Execute(); + return; + } + + BatchTxnMonitor ate = new(executor); + ate.Execute(policy, ops); + } + + public sealed class SingleTxnMonitor : AsyncTxnMonitor + { + public SingleTxnMonitor(AsyncCluster cluster, AsyncWriteBase command) + : base(command, cluster) + { + } + + public override void RunCommand() + { + command.Execute(); + } + + public override void OnFailure(AerospikeException ae) + { + command.OnFailure(ae); + } + } + + public sealed class BatchTxnMonitor : AsyncTxnMonitor + { + private readonly AsyncBatchExecutor executor; + private readonly AsyncBatchCommand[] commands; + + public BatchTxnMonitor(AsyncBatchExecutor executor) + : base(null, null) + { + this.executor = executor; + this.commands = executor.commands; + } + + public override void RunCommand() + { + executor.Execute(commands); + } + + public override void OnFailure(AerospikeException ae) + { + executor.OnFailure(ae); + } + } + + readonly AsyncCommand command; + readonly AsyncCluster cluster; + + private AsyncTxnMonitor(AsyncCommand command, AsyncCluster cluster) + { + this.command = command; + this.cluster = cluster; + } + + void Execute(Policy policy, Operation[] ops) + { + Key tranKey = TxnMonitor.GetTxnMonitorKey(policy.Txn); + WritePolicy wp = TxnMonitor.CopyTimeoutPolicy(policy); + + ExecuteRecordListener tranListener = new(this); + + // Add write key(s) to MRT monitor. + OperateArgs args = new(wp, null, null, ops); + AsyncTxnAddKeys tranCommand = new(cluster, tranListener, tranKey, args); + tranCommand.Execute(); + } + + private void NotifyFailure(AerospikeException ae) + { + try + { + OnFailure(ae); + } + catch (Exception t) + { + Log.Error("notifyCommandFailure onFailure() failed: " + t.StackTrace); + } + } + + public abstract void OnFailure(AerospikeException ae); + public abstract void RunCommand(); + + private sealed class ExecuteRecordListener : RecordListener + { + private readonly AsyncTxnMonitor monitor; + + public ExecuteRecordListener(AsyncTxnMonitor monitor) + { + this.monitor = monitor; + } + + public void OnSuccess(Key key, Record record) + { + try + { + // Run original command. + monitor.RunCommand(); + } + catch (AerospikeException ae) + { + monitor.NotifyFailure(ae); + } + catch (Exception t) + { + monitor.NotifyFailure(new AerospikeException(t)); + } + } + + public void OnFailure(AerospikeException ae) + { + monitor.NotifyFailure(new AerospikeException(ResultCode.TRAN_FAILED, "Failed to add key(s) to MRT monitor", ae)); + } + } + } +} + diff --git a/AerospikeClient/Async/AsyncTxnRoll.cs b/AerospikeClient/Async/AsyncTxnRoll.cs new file mode 100644 index 00000000..10bdad1a --- /dev/null +++ b/AerospikeClient/Async/AsyncTxnRoll.cs @@ -0,0 +1,430 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using static Aerospike.Client.AbortStatus; +using static Aerospike.Client.CommitError; +using static Aerospike.Client.CommitStatus; + +namespace Aerospike.Client +{ + public sealed class AsyncTxnRoll + { + private readonly AsyncCluster cluster; + private readonly BatchPolicy verifyPolicy; + private readonly BatchPolicy rollPolicy; + private readonly WritePolicy writePolicy; + private readonly Txn txn; + private readonly Key tranKey; + private CommitListener commitListener; + private AbortListener abortListener; + private BatchRecord[] verifyRecords; + private BatchRecord[] rollRecords; + private AerospikeException verifyException; + + public AsyncTxnRoll + ( + AsyncCluster cluster, + BatchPolicy verifyPolicy, + BatchPolicy rollPolicy, + Txn txn + ) + { + this.cluster = cluster; + this.verifyPolicy = verifyPolicy; + this.rollPolicy = rollPolicy; + this.writePolicy = new WritePolicy(rollPolicy); + this.txn = txn; + this.tranKey = TxnMonitor.GetTxnMonitorKey(txn); + } + + public void Commit(CommitListener listener) + { + commitListener = listener; + Verify(new VerifyListener(this)); + } + + public void Abort(AbortListener listener) + { + abortListener = listener; + + Roll(new RollListener(this), Command.INFO4_MRT_ROLL_BACK); + } + + private void Verify(BatchRecordArrayListener verifyListener) + { + // Validate record versions in a batch. + HashSet> reads = txn.Reads.ToHashSet>(); + int max = reads.Count; + if (max == 0) + { + return; + } + + BatchRecord[] records = new BatchRecord[max]; + Key[] keys = new Key[max]; + long[] versions = new long[max]; + int count = 0; + + foreach (KeyValuePair entry in reads) + { + Key key = entry.Key; + keys[count] = key; + records[count] = new BatchRecord(key, false); + versions[count] = entry.Value; + count++; + } + this.verifyRecords = records; + + new AsyncBatchTxnVerifyExecutor(cluster, verifyPolicy, verifyListener, keys, versions, records); + } + + private void MarkRollForward() + { + // Tell MRT monitor that a roll-forward will commence. + try + { + MarkRollForwardListener writeListener = new(this); + AsyncTxnMarkRollForward command = new(cluster, txn, writeListener, writePolicy, tranKey); + command.Execute(); + } + catch (Exception t) + { + NotifyCommitFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, t, false); + } + } + + private void RollForward() + { + try + { + RollForwardListener rollListener = new(this); + Roll(rollListener, Command.INFO4_MRT_ROLL_FORWARD); + } + catch (Exception t) + { + NotifyCommitSuccess(CommitStatusType.ROLL_FORWARD_ABANDONED); + } + } + + private void RollBack() + { + try + { + RollForwardListener rollListener = new(this); + Roll(rollListener, Command.INFO4_MRT_ROLL_BACK); + } + catch (Exception t) + { + NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, t, false); + } + } + + private void Roll(BatchRecordArrayListener rollListener, int txnAttr) + { + HashSet keySet = txn.Writes; + + if (keySet.Count == 0) + { + return; + } + + Key[] keys = keySet.ToArray(); + BatchRecord[] records = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + records[i] = new BatchRecord(keys[i], true); + } + + this.rollRecords = records; + + // Copy txn roll policy because it needs to be modified. + BatchPolicy batchPolicy = new(rollPolicy); + + BatchAttr attr = new(); + attr.SetTxn(txnAttr); + + new AsyncBatchTxnRollExecutor(cluster, verifyPolicy, rollListener, keys, records, attr); + } + + private void CloseOnCommit(bool verified) + { + if (!txn.MonitorMightExist()) + { + if (verified) + { + NotifyCommitSuccess(CommitStatusType.OK); + } + else + { + NotifyCommitFailure(CommitErrorType.VERIFY_FAIL, null, false); + } + } + } + + private void CloseOnAbort() + { + if (!txn.MonitorMightExist()) + { + // There is no MRT monitor record to remove. + NotifyAbortSuccess(AbortStatusType.OK); + return; + } + + try + { + CloseOnAbortListener deleteListener = new(this); + AsyncTxnClose command = new(cluster, txn, deleteListener, writePolicy, tranKey); + command.Execute(); + } + catch (Exception t) + { + NotifyAbortSuccess(AbortStatusType.CLOSE_ABANDONED); + } + } + + private void NotifyCommitSuccess(CommitStatusType status) + { + txn.Clear(); + + try + { + commitListener.OnSuccess(status); + } + catch (Exception t) + { + Log.Error("CommitListener onSuccess() failed: " + t.StackTrace); + } + } + + private void NotifyCommitFailure(CommitErrorType error, Exception cause, bool setInDoubt) + { + try + { + AerospikeException.Commit aec = (cause == null) ? + new AerospikeException.Commit(error, verifyRecords, rollRecords) : + new AerospikeException.Commit(error, verifyRecords, rollRecords, cause); + + if (verifyException != null) + { + //aec.AddSuppressed(verifyException); TODO + } + + if (cause is AerospikeException) { + AerospikeException src = (AerospikeException)cause; + aec.Node = src.Node; + aec.Policy = src.Policy; + aec.Iteration = src.Iteration; + + if (setInDoubt) + { + aec.SetInDoubt(src.InDoubt); + } + } + + commitListener.OnFailure(aec); + } + catch (Exception t) + { + Log.Error("CommitListener onFailure() failed: " + t.StackTrace); + } + } + + private void NotifyAbortSuccess(AbortStatusType status) + { + txn.Clear(); + + try + { + abortListener.OnSuccess(status); + } + catch (Exception t) + { + Log.Error("AbortListener onSuccess() failed: " + t.StackTrace); + } + } + + private sealed class VerifyListener : BatchRecordArrayListener + { + private readonly AsyncTxnRoll command; + + public VerifyListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + command.verifyRecords = records; + + if (status) + { + if (command.txn.MonitorExists()) + { + command.MarkRollForward(); + } + else + { + // There is nothing to roll-forward. + command.CloseOnCommit(true); + } + } + else + { + command.RollBack(); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException ae) + { + command.verifyRecords = records; + command.verifyException = ae; + command.RollBack(); + } + }; + + private sealed class RollListener : BatchRecordArrayListener + { + private readonly AsyncTxnRoll command; + + public RollListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + command.rollRecords = records; + + if (status) + { + command.CloseOnAbort(); + } + else + { + command.NotifyAbortSuccess(AbortStatusType.ROLL_BACK_ABANDONED); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException ae) + { + command.rollRecords = records; + command.NotifyAbortSuccess(AbortStatusType.ROLL_BACK_ABANDONED); + } + }; + + private sealed class MarkRollForwardListener : WriteListener + { + private readonly AsyncTxnRoll command; + + public MarkRollForwardListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(Key key) + { + command.RollForward(); + } + + public void OnFailure(AerospikeException ae) + { + command.NotifyCommitFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, ae, true); + } + }; + + private sealed class RollForwardListener : BatchRecordArrayListener + { + private readonly AsyncTxnRoll command; + + public RollForwardListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + command.rollRecords = records; + + if (status) + { + command.CloseOnCommit(true); + } + else + { + command.NotifyCommitSuccess(CommitStatusType.ROLL_FORWARD_ABANDONED); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException ae) + { + command.rollRecords = records; + command.NotifyCommitSuccess(CommitStatusType.ROLL_FORWARD_ABANDONED); + } + }; + + private sealed class RollBackListener : BatchRecordArrayListener + { + private readonly AsyncTxnRoll command; + + public RollBackListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + command.rollRecords = records; + + if (status) + { + command.CloseOnCommit(false); + } + else + { + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, null, false); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException ae) + { + command.rollRecords = records; + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, ae, false); + } + }; + + private sealed class CloseOnAbortListener : DeleteListener + { + private readonly AsyncTxnRoll command; + + public CloseOnAbortListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(Key key, bool existed) + { + command.NotifyAbortSuccess(AbortStatusType.OK); + } + + public void OnFailure(AerospikeException ae) + { + command.NotifyAbortSuccess(AbortStatusType.CLOSE_ABANDONED); + } + }; + } +} + diff --git a/AerospikeClient/Async/AsyncWrite.cs b/AerospikeClient/Async/AsyncWrite.cs index f95ce6a1..923a5dec 100644 --- a/AerospikeClient/Async/AsyncWrite.cs +++ b/AerospikeClient/Async/AsyncWrite.cs @@ -17,12 +17,9 @@ namespace Aerospike.Client { - public sealed class AsyncWrite : AsyncSingleCommand + public sealed class AsyncWrite : AsyncWriteBase { - private readonly WritePolicy writePolicy; private readonly WriteListener listener; - private readonly Key key; - private readonly Partition partition; private readonly Bin[] bins; private readonly Operation.Type operation; @@ -34,24 +31,18 @@ public AsyncWrite Key key, Bin[] bins, Operation.Type operation - ) : base(cluster, writePolicy) + ) : base(cluster, writePolicy, key) { - this.writePolicy = writePolicy; this.listener = listener; - this.key = key; - this.partition = Partition.Write(cluster, policy, key); this.bins = bins; this.operation = operation; - cluster.AddTran(); + cluster.AddCommand(); } public AsyncWrite(AsyncWrite other) : base(other) { - this.writePolicy = other.writePolicy; this.listener = other.listener; - this.key = other.key; - this.partition = other.partition; this.bins = other.bins; this.operation = other.operation; } @@ -61,33 +52,18 @@ protected internal override AsyncCommand CloneCommand() return new AsyncWrite(this); } - protected internal override bool IsWrite() - { - return true; - } - - protected internal override Node GetNode(Cluster cluster) - { - return partition.GetNodeWrite(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } - protected internal override void WriteBuffer() { - SetWrite(writePolicy, operation, key, bins); + SetWrite(writePolicy, operation, Key, bins); } - protected internal override void ParseResult() + protected internal override bool ParseResult() { - int resultCode = dataBuffer[dataOffset + 5]; + ParseHeader(); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { - return; + return true; } if (resultCode == ResultCode.FILTERED_OUT) @@ -96,23 +72,17 @@ protected internal override void ParseResult() { throw new AerospikeException(resultCode); } - return; + return true; } throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryWrite(timeout); - return true; - } - protected internal override void OnSuccess() { if (listener != null) { - listener.OnSuccess(key); + listener.OnSuccess(Key); } } @@ -125,3 +95,4 @@ protected internal override void OnFailure(AerospikeException e) } } } + diff --git a/AerospikeClient/Async/AsyncWriteBase.cs b/AerospikeClient/Async/AsyncWriteBase.cs new file mode 100644 index 00000000..638951f1 --- /dev/null +++ b/AerospikeClient/Async/AsyncWriteBase.cs @@ -0,0 +1,80 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public abstract class AsyncWriteBase : AsyncSingleCommand + { + protected readonly WritePolicy writePolicy; + public Key Key { get; private set; } + protected readonly Partition partition; + + public AsyncWriteBase + ( + AsyncCluster cluster, + WritePolicy writePolicy, + Key key + ) : base(cluster, writePolicy) + { + this.writePolicy = writePolicy; + this.Key = key; + this.partition = Partition.Write(cluster, policy, key); + cluster.AddCommand(); + } + + public AsyncWriteBase(AsyncWriteBase other) + : base(other) + { + this.writePolicy = other.writePolicy; + this.Key = other.Key; + this.partition = other.partition; + } + + protected internal override bool IsWrite() + { + return true; + } + + protected internal override Node GetNode(Cluster cluster) + { + return partition.GetNodeWrite(cluster); + } + + protected override Latency.LatencyType GetLatencyType() + { + return Latency.LatencyType.WRITE; + } + + protected internal override bool PrepareRetry(bool timeout) + { + partition.PrepareRetryWrite(timeout); + return true; + } + + protected internal override void OnInDoubt() + { + if (writePolicy.Txn != null) + { + writePolicy.Txn.OnWriteInDoubt(Key); + } + } + + protected internal abstract override void WriteBuffer(); + + protected internal abstract override bool ParseResult(); + } +} \ No newline at end of file diff --git a/AerospikeClient/Async/IAsyncClient.cs b/AerospikeClient/Async/IAsyncClient.cs index 70d130aa..57172fcd 100644 --- a/AerospikeClient/Async/IAsyncClient.cs +++ b/AerospikeClient/Async/IAsyncClient.cs @@ -45,29 +45,29 @@ public interface IAsyncClient : IAerospikeClient /// Asynchronously attempt to commit the given multi-record transaction. First, the expected /// record versions are sent to the server nodes for verification.If all nodes return success, /// the transaction is committed.Otherwise, the transaction is aborted. - ///

+ /// /// This method registers the command with an event loop and returns. /// The event loop thread will process the command and send the results to the listener. - ///

+ /// /// Requires server version 8.0+ - ///

+ /// ///
/// where to send results /// multi-record transaction - void Commit(CommitListener listener, Tran tran); + void Commit(CommitListener listener, Txn txn); /// - /// Asynchronously abort and rollback the given multi-record transaction. - ///

- /// This method registers the command with an event loop and returns. - /// The event loop thread will process the command and send the results to the listener. - ///

- /// Requires server version 8.0+ - ///

+ /// Asynchronously abort and rollback the given multi-record transaction. + /// + /// This method registers the command with an event loop and returns. + /// The event loop thread will process the command and send the results to the listener. + /// + /// Requires server version 8.0+ + /// ///
/// /// - void Abort(AbortListener listener, Tran tran); + void Abort(AbortListener listener, Txn tran); //------------------------------------------------------- // Write Record Operations diff --git a/AerospikeClient/Cluster/Cluster.cs b/AerospikeClient/Cluster/Cluster.cs index 32107bea..9f8b5342 100644 --- a/AerospikeClient/Cluster/Cluster.cs +++ b/AerospikeClient/Cluster/Cluster.cs @@ -14,10 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -using System; -using System.Collections.Generic; using System.Text; -using System.Threading; namespace Aerospike.Client { @@ -97,7 +94,7 @@ public class Cluster // Login timeout. protected internal readonly int loginTimeout; - // Maximum socket idle to validate connections in transactions. + // Maximum socket idle to validate connections in commands. private readonly double maxSocketIdleMillisTran; // Maximum socket idle to trim peak connections to min connections. @@ -137,7 +134,7 @@ public class Cluster public MetricsPolicy MetricsPolicy; private volatile IMetricsListener metricsListener; private volatile int retryCount; - private volatile int tranCount; + private volatile int commandCount; private volatile int delayQueueTimeoutCount; public Cluster(ClientPolicy policy, Host[] hosts) @@ -1201,26 +1198,26 @@ private static bool SupportsPartitionQuery(Node[] nodes) } /// - /// Increment transaction count when metrics are enabled. + /// Increment command count when metrics are enabled. /// - public void AddTran() + public void AddCommand() { if (MetricsEnabled) { - Interlocked.Increment(ref tranCount); + Interlocked.Increment(ref commandCount); } } /// - /// Return transaction count. The value is cumulative and not reset per metrics interval. + /// Return command count. The value is cumulative and not reset per metrics interval. /// - public int GetTranCount() + public int GetCommandCount() { - return tranCount; + return commandCount; } /// - /// Increment transaction retry count. There can be multiple retries for a single transaction. + /// Increment command retry count. There can be multiple retries for a single command. /// public void AddRetry() { @@ -1228,7 +1225,7 @@ public void AddRetry() } /// - /// Add transaction retry count. There can be multiple retries for a single transaction. + /// Add command retry count. There can be multiple retries for a single command. /// public void AddRetries(int count) { @@ -1236,7 +1233,7 @@ public void AddRetries(int count) } /// - /// Return transaction retry count. The value is cumulative and not reset per metrics interval. + /// Return command retry count. The value is cumulative and not reset per metrics interval. /// public int GetRetryCount() { diff --git a/AerospikeClient/Cluster/ClusterStats.cs b/AerospikeClient/Cluster/ClusterStats.cs index 0196fdf3..3aa19513 100644 --- a/AerospikeClient/Cluster/ClusterStats.cs +++ b/AerospikeClient/Cluster/ClusterStats.cs @@ -51,7 +51,7 @@ public sealed class ClusterStats public readonly int invalidNodeCount; /// - /// Count of transaction retires since cluster was started. + /// Count of command retires since cluster was started. /// public readonly long RetryCount; @@ -126,14 +126,14 @@ public sealed class NodeStats public readonly ConnectionStats asyncStats; /// - /// Transaction error count since node was initialized. If the error is retryable, multiple errors per - /// transaction may occur. + /// Command error count since node was initialized. If the error is retryable, multiple errors per + /// Command may occur. /// public readonly long ErrorCount; /// - /// Transaction timeout count since node was initialized. If the timeout is retryable (ie socketTimeout), - /// multiple timeouts per transaction may occur. + /// Command timeout count since node was initialized. If the timeout is retryable (ie socketTimeout), + /// multiple timeouts per Command may occur. /// public readonly long TimeoutCount; diff --git a/AerospikeClient/Cluster/Node.cs b/AerospikeClient/Cluster/Node.cs index eaeb440d..f28017de 100644 --- a/AerospikeClient/Cluster/Node.cs +++ b/AerospikeClient/Cluster/Node.cs @@ -976,8 +976,8 @@ public void ValidateErrorCount() } /// - /// Increment transaction error count. If the error is retryable, multiple errors per - /// transaction may occur. + /// Increment command error count. If the error is retryable, multiple errors per + /// command may occur. /// public void AddError() @@ -986,8 +986,8 @@ public void AddError() } /// - /// Increment transaction timeout count. If the timeout is retryable (ie socketTimeout), - /// multiple timeouts per transaction may occur. + /// Increment command timeout count. If the timeout is retryable (ie socketTimeout), + /// multiple timeouts per command may occur. /// public void AddTimeout() { @@ -995,7 +995,7 @@ public void AddTimeout() } /// - /// Return transaction error count. The value is cumulative and not reset per metrics interval. + /// Return command error count. The value is cumulative and not reset per metrics interval. /// public int GetErrorCount() { @@ -1003,7 +1003,7 @@ public int GetErrorCount() } /// - /// Return transaction timeout count. The value is cumulative and not reset per metrics interval. + /// Return command timeout count. The value is cumulative and not reset per metrics interval. /// public int GetTimeoutCount() { diff --git a/AerospikeClient/Command/Batch.cs b/AerospikeClient/Command/Batch.cs index 79b4b39c..aec9cdf1 100644 --- a/AerospikeClient/Command/Batch.cs +++ b/AerospikeClient/Command/Batch.cs @@ -186,12 +186,6 @@ protected internal override void WriteBuffer() protected internal override bool ParseRow() { ParseFieldsRead(keys[batchIndex]); - - if (opCount > 0) - { - throw new AerospikeException.Parse("Received bins that were not requested!"); - } - existsArray[batchIndex] = resultCode == 0; return true; } @@ -286,6 +280,10 @@ protected internal override void SetInDoubt(bool inDoubt) if (record.resultCode == ResultCode.NO_RESPONSE) { record.inDoubt = record.hasWrite; + + if (record.inDoubt && policy.Txn != null) { + policy.Txn.OnWriteInDoubt(record.key); + } } } } @@ -372,6 +370,10 @@ protected internal override void SetInDoubt(bool inDoubt) if (record.resultCode == ResultCode.NO_RESPONSE) { record.inDoubt = inDoubt; + + if (record.inDoubt && policy.Txn != null) { + policy.Txn.OnWriteInDoubt(record.key); + } } } } @@ -479,6 +481,10 @@ protected internal override void SetInDoubt(bool inDoubt) if (record.resultCode == ResultCode.NO_RESPONSE) { record.inDoubt = inDoubt; + + if (record.inDoubt && policy.Txn != null) { + policy.Txn.OnWriteInDoubt(record.key); + } } } } @@ -498,25 +504,25 @@ protected internal override List GenerateBatchNodes() // MRT //------------------------------------------------------- - public sealed class BatchTranVerify : BatchCommand + public sealed class BatchTxnVerify : BatchCommand { - private readonly Tran tran; + private readonly Txn txn; private readonly Key[] keys; private readonly long[] versions; private readonly BatchRecord[] records; - public BatchTranVerify( + public BatchTxnVerify( Cluster cluster, BatchNode batch, BatchPolicy batchPolicy, - Tran tran, + Txn tran, Key[] keys, long[] versions, BatchRecord[] records, BatchStatus status ) : base(cluster, batch, batchPolicy, status, false) { - this.tran = tran; + this.txn = tran; this.keys = keys; this.versions = versions; this.records = records; @@ -529,7 +535,7 @@ protected internal override bool IsWrite() protected internal override void WriteBuffer() { - SetBatchTranVerify(batchPolicy, tran, keys, versions, batch); + SetBatchTxnVerify(batchPolicy, txn, keys, versions, batch); } protected internal override bool ParseRow() @@ -552,7 +558,7 @@ protected internal override bool ParseRow() protected internal override BatchCommand CreateCommand(BatchNode batchNode) { - return new BatchTranVerify(cluster, batchNode, batchPolicy, tran, keys, versions, records, status); + return new BatchTxnVerify(cluster, batchNode, batchPolicy, txn, keys, versions, records, status); } protected internal override List GenerateBatchNodes() @@ -561,13 +567,13 @@ protected internal override List GenerateBatchNodes() } } - public sealed class BatchTranRoll : BatchCommand + public sealed class BatchTxnRoll : BatchCommand { private readonly Key[] keys; private readonly BatchRecord[] records; private readonly BatchAttr attr; - public BatchTranRoll( + public BatchTxnRoll( Cluster cluster, BatchNode batch, BatchPolicy batchPolicy, @@ -589,7 +595,7 @@ protected internal override bool IsWrite() protected internal override void WriteBuffer() { - SetBatchTranRoll(batchPolicy, keys, batch, attr); + SetBatchTxnRoll(batchPolicy, keys, batch, attr); } protected internal override bool ParseRow() @@ -630,7 +636,7 @@ protected internal override void SetInDoubt(bool inDoubt) protected internal override BatchCommand CreateCommand(BatchNode batchNode) { - return new BatchTranRoll(cluster, batchNode, batchPolicy, keys, records, attr, status); + return new BatchTxnRoll(cluster, batchNode, batchPolicy, keys, records, attr, status); } protected internal override List GenerateBatchNodes() @@ -701,10 +707,10 @@ public void Run(object obj) protected void ParseFieldsRead(Key key) { - if (policy.Tran != null) + if (policy.Txn != null) { long? version = ParseVersion(fieldCount); - policy.Tran.OnRead(key, version); + policy.Txn.OnRead(key, version); } else { @@ -714,17 +720,17 @@ protected void ParseFieldsRead(Key key) protected void ParseFields(BatchRecord br) { - if (policy.Tran != null) + if (policy.Txn != null) { long? version = ParseVersion(fieldCount); if (br.hasWrite) { - policy.Tran.OnWrite(br.key, version, resultCode); + policy.Txn.OnWrite(br.key, version, resultCode); } else { - policy.Tran.OnRead(br.key, version); + policy.Txn.OnRead(br.key, version); } } else diff --git a/AerospikeClient/Command/BatchAttr.cs b/AerospikeClient/Command/BatchAttr.cs index 42839e40..723a6880 100644 --- a/AerospikeClient/Command/BatchAttr.cs +++ b/AerospikeClient/Command/BatchAttr.cs @@ -409,7 +409,7 @@ public void SetOpSize(Operation[] ops) opSize = dataOffset; } - public void SetTran(int attr) + public void SetTxn(int attr) { filterExp = null; readAttr = 0; diff --git a/AerospikeClient/Command/BatchExecutor.cs b/AerospikeClient/Command/BatchExecutor.cs index 7fc82ac4..6da7f1ca 100644 --- a/AerospikeClient/Command/BatchExecutor.cs +++ b/AerospikeClient/Command/BatchExecutor.cs @@ -23,7 +23,7 @@ public sealed class BatchExecutor { public static void Execute(Cluster cluster, BatchPolicy policy, BatchCommand[] commands, BatchStatus status) { - cluster.AddTran(); + cluster.AddCommand(); if (policy.maxConcurrentThreads == 1 || commands.Length <= 1) { diff --git a/AerospikeClient/Command/Command.cs b/AerospikeClient/Command/Command.cs index fcba2a1e..9940f7d5 100644 --- a/AerospikeClient/Command/Command.cs +++ b/AerospikeClient/Command/Command.cs @@ -117,7 +117,7 @@ public Command(int socketTimeout, int totalTimeout, int maxRetries) // Multi-record Transactions //-------------------------------------------------- - public void SetTranAddKeys(WritePolicy policy, Key key, OperateArgs args) + public void SetTxnAddKeys(WritePolicy policy, Key key, OperateArgs args) { Begin(); int fieldCount = EstimateKeySize(key); @@ -131,7 +131,7 @@ public void SetTranAddKeys(WritePolicy policy, Key key, OperateArgs args) End(policy.compress); } - public void SetTranVerify(Tran tran, Key key, long ver) + public void SetTranVerify(Txn tran, Key key, long ver) { Begin(); int fieldCount = EstimateKeySize(key); @@ -160,9 +160,9 @@ public void SetTranVerify(Tran tran, Key key, long ver) End(); } - public void SetBatchTranVerify( + public void SetBatchTxnVerify( BatchPolicy policy, - Tran tran, + Txn tran, Key[] keys, long[] versions, BatchNode batch @@ -174,7 +174,7 @@ BatchNode batch public void SetBatchTranVerify( BatchPolicy policy, - Tran tran, + Txn tran, Key[] keys, long[] versions, BatchOffsets offsets @@ -279,7 +279,7 @@ BatchOffsets offsets End(compress); } - public void SetTranMarkRollForward(Tran tran, Key key) + public void SetTxnMarkRollForward(Txn tran, Key key) { Bin bin = new("fwd", true); @@ -291,7 +291,7 @@ public void SetTranMarkRollForward(Tran tran, Key key) End(); } - public void SetTranRoll(Key key, Tran tran, int tranAttr) + public void SetTranRoll(Key key, Txn tran, int tranAttr) { Begin(); int fieldCount = EstimateKeySize(key); @@ -317,7 +317,7 @@ public void SetTranRoll(Key key, Tran tran, int tranAttr) End(); } - public void SetBatchTranRoll( + public void SetBatchTxnRoll( BatchPolicy policy, Key[] keys, BatchNode batch, @@ -339,8 +339,8 @@ BatchOffsets offsets Begin(); int fieldCount = 1; int max = offsets.Size(); - Tran tran = policy.Tran; - long[] versions = new long[max]; + Txn tran = policy.Txn; + long?[] versions = new long?[max]; for (int i = 0; i < max; i++) { @@ -358,7 +358,7 @@ BatchOffsets offsets { int offset = offsets.Get(i); Key key = keys[offset]; - long ver = versions[i]; + long? ver = versions[i]; dataOffset += key.digest.Length + 4; @@ -395,7 +395,7 @@ BatchOffsets offsets { int offset = offsets.Get(i); Key key = keys[offset]; - long ver = versions[i]; + long? ver = versions[i]; ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); dataOffset += 4; @@ -422,7 +422,7 @@ BatchOffsets offsets End(compress); } - public void SetTranClose(Tran tran, Key key) + public void SetTxnClose(Txn tran, Key key) { Begin(); int fieldCount = EstimateKeySize(key); @@ -1121,7 +1121,7 @@ public void SetBatchOperate( { Begin(); int max = offsets.Size(); - Tran tran = policy.Tran; + Txn tran = policy.Txn; long?[] versions = null; if (tran != null) @@ -1280,7 +1280,7 @@ public void SetBatchOperate( { attr.SetUDF(policy); } - WriteBatchWrite(key, policy.Tran, null, attr, attr.filterExp, 3, 0); + WriteBatchWrite(key, policy.Txn, null, attr, attr.filterExp, 3, 0); WriteField(bu.packageName, FieldType.UDF_PACKAGE_NAME); WriteField(bu.functionName, FieldType.UDF_FUNCTION); WriteField(bu.argBytes, FieldType.UDF_ARGLIST); @@ -1337,7 +1337,7 @@ BatchOffsets offsets { // Estimate full row size int max = offsets.Size(); - Tran tran = policy.Tran; + Txn tran = policy.Txn; long?[] versions = null; Begin(); @@ -1511,7 +1511,7 @@ BatchOffsets offsets // Estimate buffer size. Begin(); int max = offsets.Size(); - Tran tran = policy.Tran; + Txn tran = policy.Txn; long?[] versions = null; if (tran != null) @@ -1672,7 +1672,7 @@ private static byte GetBatchFlags(BatchPolicy policy) return flags; } - private void SizeTranBatch(Tran tran, long? ver) + private void SizeTranBatch(Txn tran, long? ver) { if (tran != null) { @@ -1711,7 +1711,7 @@ private void WriteBatchHeader(Policy policy, int timeout, int fieldCount) dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); } - private void WriteBatchBinNames(Key key, Tran tran, long? ver, string[] binNames, BatchAttr attr, Expression filter) + private void WriteBatchBinNames(Key key, Txn tran, long? ver, string[] binNames, BatchAttr attr, Expression filter) { WriteBatchRead(key, tran, ver, attr, filter, binNames.Length); @@ -1721,7 +1721,7 @@ private void WriteBatchBinNames(Key key, Tran tran, long? ver, string[] binNames } } - private void WriteBatchOperations(Key key, Tran tran, long? ver, Operation[] ops, BatchAttr attr, Expression filter) + private void WriteBatchOperations(Key key, Txn tran, long? ver, Operation[] ops, BatchAttr attr, Expression filter) { if (attr.hasWrite) { @@ -1738,7 +1738,7 @@ private void WriteBatchOperations(Key key, Tran tran, long? ver, Operation[] ops } } - private void WriteBatchRead(Key key, Tran tran, long? ver, BatchAttr attr, Expression filter, int opCount) + private void WriteBatchRead(Key key, Txn tran, long? ver, BatchAttr attr, Expression filter, int opCount) { dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_TTL); dataBuffer[dataOffset++] = (byte)attr.readAttr; @@ -1750,7 +1750,7 @@ private void WriteBatchRead(Key key, Tran tran, long? ver, BatchAttr attr, Expre WriteBatchFields(key, tran, ver, attr,filter, 0, opCount); } - private void WriteBatchWrite(Key key, Tran tran, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) + private void WriteBatchWrite(Key key, Txn tran, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) { dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_GEN | BATCH_MSG_TTL); dataBuffer[dataOffset++] = (byte)attr.readAttr; @@ -1764,7 +1764,7 @@ private void WriteBatchWrite(Key key, Tran tran, long? ver, BatchAttr attr, Expr WriteBatchFields(key, tran, ver, attr, filter, fieldCount, opCount); } - private void WriteBatchFields(Key key, Tran tran, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) + private void WriteBatchFields(Key key, Txn tran, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) { if (tran != null) { @@ -2356,7 +2356,7 @@ private int EstimateKeySize(Policy policy, Key key, bool sendDeadline) { int fieldCount = EstimateKeySize(key); - fieldCount += SizeTran(key, policy.Tran, sendDeadline); + fieldCount += SizeTran(key, policy.Txn, sendDeadline); if (policy.sendKey) { @@ -2737,7 +2737,7 @@ int operationCount private void WriteKey(Policy policy, Key key, bool sendDeadline) { WriteKey(key); - WriteTran(policy.Tran, sendDeadline); + WriteTran(policy.Txn, sendDeadline); if (policy.sendKey) { @@ -2847,7 +2847,7 @@ private void WriteOperation(Operation.Type operationType) dataBuffer[dataOffset++] = 0; } - private int SizeTran(Key key, Tran tran, bool sendDeadline) + private int SizeTran(Key key, Txn tran, bool sendDeadline) { int fieldCount = 0; @@ -2873,7 +2873,7 @@ private int SizeTran(Key key, Tran tran, bool sendDeadline) return fieldCount; } - private void WriteTran(Tran tran, bool sendDeadline) + private void WriteTran(Txn tran, bool sendDeadline) { if (tran != null) { diff --git a/AerospikeClient/Command/DeleteCommand.cs b/AerospikeClient/Command/DeleteCommand.cs index d2b17713..2d87cf6e 100644 --- a/AerospikeClient/Command/DeleteCommand.cs +++ b/AerospikeClient/Command/DeleteCommand.cs @@ -15,37 +15,18 @@ * the License. */ +using System; + namespace Aerospike.Client { - public sealed class DeleteCommand : SyncCommand + public sealed class DeleteCommand : SyncWriteCommand { - private readonly WritePolicy writePolicy; - private readonly Key key; - private readonly Partition partition; private bool existed; public DeleteCommand(Cluster cluster, WritePolicy writePolicy, Key key) - : base(cluster, writePolicy) - { - this.writePolicy = writePolicy; - this.key = key; - this.partition = Partition.Write(cluster, writePolicy, key); - cluster.AddTran(); - } - - protected internal override bool IsWrite() - { - return true; - } - - protected internal override Node GetNode() + : base(cluster, writePolicy, key) { - return partition.GetNodeWrite(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; + cluster.AddCommand(); } protected internal override void WriteBuffer() @@ -55,11 +36,7 @@ protected internal override void WriteBuffer() protected internal override void ParseResult(IConnection conn) { - // Read header. - conn.ReadFully(dataBuffer, MSG_TOTAL_HEADER_SIZE, Command.STATE_READ_HEADER); - conn.UpdateLastUsed(); - - int resultCode = dataBuffer[13]; + ParseHeader(conn); if (resultCode == 0) { @@ -86,12 +63,6 @@ protected internal override void ParseResult(IConnection conn) throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryWrite(timeout); - return true; - } - public bool Existed() { return existed; diff --git a/AerospikeClient/Command/ExecuteCommand.cs b/AerospikeClient/Command/ExecuteCommand.cs index a92bfe3b..0c7a7bb4 100644 --- a/AerospikeClient/Command/ExecuteCommand.cs +++ b/AerospikeClient/Command/ExecuteCommand.cs @@ -15,14 +15,16 @@ * the License. */ +using Aerospike.Client; + namespace Aerospike.Client { - public sealed class ExecuteCommand : ReadCommand + public sealed class ExecuteCommand : SyncWriteCommand { - private readonly WritePolicy writePolicy; private readonly string packageName; private readonly string functionName; private readonly Value[] args; + public Record Record { get; private set; } public ExecuteCommand ( @@ -32,43 +34,78 @@ public ExecuteCommand string packageName, string functionName, Value[] args - ) : base(cluster, writePolicy, key, Partition.Write(cluster, writePolicy, key), false) + ) : base(cluster, writePolicy, key) { - this.writePolicy = writePolicy; this.packageName = packageName; this.functionName = functionName; this.args = args; } - protected internal override bool IsWrite() + protected internal override void WriteBuffer() { - return true; + SetUdf(writePolicy, key, packageName, functionName, args); } - protected internal override Node GetNode() + protected internal override void ParseResult(IConnection conn) { - return partition.GetNodeWrite(cluster); - } + ParseHeader(conn); + ParseFields(policy.Txn, key, true); - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; - } + if (resultCode == ResultCode.OK) + { + Record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, false); + return; + } - protected internal override void WriteBuffer() - { - SetUdf(writePolicy, key, packageName, functionName, args); - } + if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, false); + HandleUdfError(resultCode); + return; + } + + if (opCount > 0) + { + throw new AerospikeException("Unexpected UDF opCount on error: " + opCount + ',' + resultCode); + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return; + } - protected internal override void HandleNotFound(int resultCode) - { throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) + private void HandleUdfError(int resultCode) { - partition.PrepareRetryWrite(timeout); - return true; + string ret = (string)Record.bins["FAILURE"]; + + if (ret == null) + { + throw new AerospikeException(resultCode); + } + + string message; + int code; + + try + { + string[] list = ret.Split(":"); + Int32.TryParse(list[2].Trim(), out code); + message = list[0] + ':' + list[1] + ' ' + list[3]; + } + catch (Exception e) + { + // Use generic exception if parse error occurs. + throw new AerospikeException(resultCode, ret); + } + + throw new AerospikeException(code, message); } } } diff --git a/AerospikeClient/Command/ExistsCommand.cs b/AerospikeClient/Command/ExistsCommand.cs index 146a4a1e..9269c9b2 100644 --- a/AerospikeClient/Command/ExistsCommand.cs +++ b/AerospikeClient/Command/ExistsCommand.cs @@ -17,28 +17,13 @@ namespace Aerospike.Client { - public sealed class ExistsCommand : SyncCommand + public sealed class ExistsCommand : SyncReadCommand { - private readonly Key key; - private readonly Partition partition; private bool exists; public ExistsCommand(Cluster cluster, Policy policy, Key key) - : base(cluster, policy) + : base(cluster, policy, key) { - this.key = key; - this.partition = Partition.Read(cluster, policy, key); - cluster.AddTran(); - } - - protected internal override Node GetNode() - { - return partition.GetNodeRead(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.READ; } protected internal override void WriteBuffer() @@ -48,13 +33,10 @@ protected internal override void WriteBuffer() protected internal override void ParseResult(IConnection conn) { - // Read header. - conn.ReadFully(dataBuffer, MSG_TOTAL_HEADER_SIZE, Command.STATE_READ_HEADER); - conn.UpdateLastUsed(); - - int resultCode = dataBuffer[13]; + ParseHeader(conn); + ParseFields(policy.Txn, key, false); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { exists = true; return; @@ -79,12 +61,6 @@ protected internal override void ParseResult(IConnection conn) throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryRead(timeout); - return true; - } - public bool Exists() { return exists; diff --git a/AerospikeClient/Command/MultiCommand.cs b/AerospikeClient/Command/MultiCommand.cs index db235eee..04473c39 100644 --- a/AerospikeClient/Command/MultiCommand.cs +++ b/AerospikeClient/Command/MultiCommand.cs @@ -27,12 +27,7 @@ public abstract class MultiCommand : SyncCommand protected internal readonly String ns; private readonly ulong clusterKey; protected internal int info3; - protected internal int resultCode; - protected internal int generation; - protected internal int expiration; protected internal int batchIndex; - protected internal int fieldCount; - protected internal int opCount; protected internal readonly bool isOperation; private readonly bool first; protected internal volatile bool valid = true; diff --git a/AerospikeClient/Command/OperateCommand.cs b/AerospikeClient/Command/OperateCommand.cs deleted file mode 100644 index 45737711..00000000 --- a/AerospikeClient/Command/OperateCommand.cs +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class OperateCommand : ReadCommand - { - private readonly OperateArgs args; - - public OperateCommand(Cluster cluster, Key key, OperateArgs args) - : base(cluster, args.writePolicy, key, args.GetPartition(cluster, key), true) - { - this.args = args; - } - - protected internal override bool IsWrite() - { - return args.hasWrite; - } - - protected internal override Node GetNode() - { - return args.hasWrite ? partition.GetNodeWrite(cluster) : partition.GetNodeRead(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return args.hasWrite ? Latency.LatencyType.WRITE : Latency.LatencyType.READ; - } - - protected internal override void WriteBuffer() - { - SetOperate(args.writePolicy, key, args); - } - - protected internal override void HandleNotFound(int resultCode) - { - // Only throw not found exception for command with write operations. - // Read-only command operations return a null record. - if (args.hasWrite) - { - throw new AerospikeException(resultCode); - } - } - - protected internal override bool PrepareRetry(bool timeout) - { - if (args.hasWrite) - { - partition.PrepareRetryWrite(timeout); - } - else - { - partition.PrepareRetryRead(timeout); - } - return true; - } - } -} diff --git a/AerospikeClient/Command/OperateCommandRead.cs b/AerospikeClient/Command/OperateCommandRead.cs new file mode 100644 index 00000000..9e15efb1 --- /dev/null +++ b/AerospikeClient/Command/OperateCommandRead.cs @@ -0,0 +1,35 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class OperateCommandRead : ReadCommand + { + private readonly OperateArgs args; + + public OperateCommandRead(Cluster cluster, Key key, OperateArgs args) + : base(cluster, args.writePolicy, key, args.GetPartition(cluster, key), true) + { + this.args = args; + } + + protected internal override void WriteBuffer() + { + SetOperate(args.writePolicy, key, args); + } + } +} diff --git a/AerospikeClient/Command/OperateCommandWrite.cs b/AerospikeClient/Command/OperateCommandWrite.cs new file mode 100644 index 00000000..a7144f8d --- /dev/null +++ b/AerospikeClient/Command/OperateCommandWrite.cs @@ -0,0 +1,65 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +using Aerospike.Client; + +namespace Aerospike.Client +{ + public sealed class OperateCommandWrite : SyncWriteCommand + { + private readonly OperateArgs args; + public Record Record { get; private set; } + + public OperateCommandWrite(Cluster cluster, Key key, OperateArgs args) + : base(cluster, args.writePolicy, key) + { + this.args = args; + } + + protected internal override void WriteBuffer() + { + SetOperate(args.writePolicy, key, args); + } + + protected internal override void ParseResult(IConnection conn) + { + ParseHeader(conn); + ParseFields(policy.Txn, key, true); + + if (resultCode == ResultCode.OK) { + Record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, true); + return; + } + + if (opCount > 0) { + throw new AerospikeException("Unexpected operate opCount on error: " + opCount + ',' + resultCode); + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return; + } + + throw new AerospikeException(resultCode); + } + + } +} diff --git a/AerospikeClient/Command/ReadCommand.cs b/AerospikeClient/Command/ReadCommand.cs index 79171f0d..a4b00eb8 100644 --- a/AerospikeClient/Command/ReadCommand.cs +++ b/AerospikeClient/Command/ReadCommand.cs @@ -17,52 +17,34 @@ namespace Aerospike.Client { - public class ReadCommand : SyncCommand + public class ReadCommand : SyncReadCommand { - protected readonly Key key; - protected readonly Partition partition; private readonly string[] binNames; private readonly bool isOperation; private Record record; public ReadCommand(Cluster cluster, Policy policy, Key key) - : base(cluster, policy) + : base(cluster, policy, key) { - this.key = key; this.binNames = null; - this.partition = Partition.Read(cluster, policy, key); this.isOperation = false; - cluster.AddTran(); + cluster.AddCommand(); } public ReadCommand(Cluster cluster, Policy policy, Key key, String[] binNames) - : base(cluster, policy) + : base(cluster, policy, key) { - this.key = key; this.binNames = binNames; - this.partition = Partition.Read(cluster, policy, key); this.isOperation = false; - cluster.AddTran(); + cluster.AddCommand(); } public ReadCommand(Cluster cluster, Policy policy, Key key, Partition partition, bool isOperation) - : base(cluster, policy) + : base(cluster, policy, key) { - this.key = key; this.binNames = null; - this.partition = partition; this.isOperation = isOperation; - cluster.AddTran(); - } - - protected internal override Node GetNode() - { - return partition.GetNodeRead(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.READ; + cluster.AddCommand(); } protected internal override void WriteBuffer() @@ -72,68 +54,22 @@ protected internal override void WriteBuffer() protected internal override void ParseResult(IConnection conn) { - // Read header. - conn.ReadFully(dataBuffer, 8, Command.STATE_READ_HEADER); - - long sz = ByteUtil.BytesToLong(dataBuffer, 0); - int receiveSize = (int)(sz & 0xFFFFFFFFFFFFL); - - if (receiveSize <= 0) - { - throw new AerospikeException("Invalid receive size: " + receiveSize); - } - - SizeBuffer(receiveSize); - conn.ReadFully(dataBuffer, receiveSize, Command.STATE_READ_DETAIL); - conn.UpdateLastUsed(); - - ulong type = (ulong)((sz >> 48) & 0xff); + ParseHeader(conn); + ParseFields(policy.Txn, key, false); - if (type == Command.AS_MSG_TYPE) + if (resultCode == ResultCode.OK) { - dataOffset = 5; + this.record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); + return; } - else if (type == Command.MSG_TYPE_COMPRESSED) - { - int usize = (int)ByteUtil.BytesToLong(dataBuffer, 0); - byte[] ubuf = new byte[usize]; - ByteUtil.Decompress(dataBuffer, 8, receiveSize, ubuf, usize); - dataBuffer = ubuf; - dataOffset = 13; - } - else + if (opCount > 0) { - throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE); - } - - int resultCode = dataBuffer[dataOffset]; - dataOffset++; - int generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - int expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 8; - int fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - int opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - - if (resultCode == 0) - { - if (opCount == 0) - { - // Bin data was not returned. - record = new Record(null, generation, expiration); - return; - } - SkipKey(fieldCount); - record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); - return; + throw new AerospikeException("Unexpected read opCount on error: " + opCount + ',' + resultCode); } if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { - HandleNotFound(resultCode); return; } @@ -146,56 +82,8 @@ record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, ge return; } - if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - SkipKey(fieldCount); - record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); - HandleUdfError(resultCode); - return; - } - throw new AerospikeException(resultCode); } - - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryRead(timeout); - return true; - } - - protected internal virtual void HandleNotFound(int resultCode) - { - // Do nothing in default case. Record will be null. - } - - private void HandleUdfError(int resultCode) - { - object obj; - - if (!record.bins.TryGetValue("FAILURE", out obj)) - { - throw new AerospikeException(resultCode); - } - - string ret = (string)obj; - string message; - int code; - - try - { - string[] list = ret.Split(':'); - code = Convert.ToInt32(list[2].Trim()); - message = list[0] + ':' + list[1] + ' ' + list[3]; - } - catch (Exception e) - { - // Use generic exception if parse error occurs. - throw new AerospikeException(resultCode, ret, e); - } - - throw new AerospikeException(code, message); - } - public Record Record { get diff --git a/AerospikeClient/Command/ReadHeaderCommand.cs b/AerospikeClient/Command/ReadHeaderCommand.cs index 14e41178..cbf443f4 100644 --- a/AerospikeClient/Command/ReadHeaderCommand.cs +++ b/AerospikeClient/Command/ReadHeaderCommand.cs @@ -17,28 +17,13 @@ namespace Aerospike.Client { - public sealed class ReadHeaderCommand : SyncCommand + public sealed class ReadHeaderCommand : SyncReadCommand { - private readonly Key key; - private readonly Partition partition; private Record record; public ReadHeaderCommand(Cluster cluster, Policy policy, Key key) - : base(cluster, policy) + : base(cluster, policy, key) { - this.key = key; - this.partition = Partition.Read(cluster, policy, key); - cluster.AddTran(); - } - - protected internal override Node GetNode() - { - return partition.GetNodeRead(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.READ; } protected internal override void WriteBuffer() @@ -48,16 +33,11 @@ protected internal override void WriteBuffer() protected internal override void ParseResult(IConnection conn) { - // Read header. - conn.ReadFully(dataBuffer, MSG_TOTAL_HEADER_SIZE, Command.STATE_READ_HEADER); - conn.UpdateLastUsed(); - - int resultCode = dataBuffer[13]; + ParseHeader(conn); + ParseFields(policy.Txn, key, false); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { - int generation = ByteUtil.BytesToInt(dataBuffer, 14); - int expiration = ByteUtil.BytesToInt(dataBuffer, 18); record = new Record(null, generation, expiration); return; } @@ -79,12 +59,6 @@ protected internal override void ParseResult(IConnection conn) throw new AerospikeException(resultCode); } - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryRead(timeout); - return true; - } - public Record Record { get diff --git a/AerospikeClient/Command/ScanExecutor.cs b/AerospikeClient/Command/ScanExecutor.cs index 8380d849..d9cbe187 100644 --- a/AerospikeClient/Command/ScanExecutor.cs +++ b/AerospikeClient/Command/ScanExecutor.cs @@ -24,7 +24,7 @@ public sealed class ScanExecutor { public static void ScanPartitions(Cluster cluster, ScanPolicy policy, string ns, string setName, string[] binNames, ScanCallback callback, PartitionTracker tracker) { - cluster.AddTran(); + cluster.AddCommand(); while (true) { diff --git a/AerospikeClient/Command/SyncCommand.cs b/AerospikeClient/Command/SyncCommand.cs index 0f74c3d6..7333516a 100644 --- a/AerospikeClient/Command/SyncCommand.cs +++ b/AerospikeClient/Command/SyncCommand.cs @@ -14,7 +14,9 @@ * License for the specific language governing permissions and limitations under * the License. */ +using System; using System.Net.Sockets; +using System.Runtime.InteropServices; using static Aerospike.Client.Latency; namespace Aerospike.Client @@ -26,6 +28,11 @@ public abstract class SyncCommand : Command internal int iteration = 1; internal int commandSentCounter; internal DateTime deadline; + protected int resultCode; + protected int generation; + protected int expiration; + protected int fieldCount; + protected int opCount; /// /// Default constructor. @@ -339,6 +346,17 @@ protected internal void SizeBuffer(int size) } } + protected void SkipFields(int fieldCount) + { + // There can be fields in the response (setname etc). + // But for now, ignore them. Expose them to the API if needed in the future. + for (int i = 0; i < fieldCount; i++) + { + int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4 + fieldlen; + } + } + protected internal sealed override void End() { // Write total size of message. @@ -346,11 +364,127 @@ protected internal sealed override void End() ByteUtil.LongToBytes(size, dataBuffer, 0); } + protected void ParseHeader(IConnection conn) + { + // Read header. + conn.ReadFully(dataBuffer, 8, Command.STATE_READ_HEADER); + + long sz = ByteUtil.BytesToLong(dataBuffer, 0); + int receiveSize = (int)(sz & 0xFFFFFFFFFFFFL); + + if (receiveSize <= 0) + { + throw new AerospikeException("Invalid receive size: " + receiveSize); + } + + SizeBuffer(receiveSize); + conn.ReadFully(dataBuffer, receiveSize, Command.STATE_READ_DETAIL); + conn.UpdateLastUsed(); + + ulong type = (ulong)(sz >> 48) & 0xff; + + if (type == Command.AS_MSG_TYPE) + { + dataOffset = 5; + } + else if (type == Command.MSG_TYPE_COMPRESSED) + { + int usize = (int)ByteUtil.BytesToLong(dataBuffer, 0); + byte[] ubuf = new byte[usize]; + + ByteUtil.Decompress(dataBuffer, 8, receiveSize, ubuf, usize); + dataBuffer = ubuf; + dataOffset = 13; + } + else + { + throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE); + } + + this.resultCode = dataBuffer[dataOffset] & 0xFF; + dataOffset++; + this.generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + this.expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 8; + this.fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + this.opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + } + + protected void ParseFields(Txn tran, Key key, bool hasWrite) + { + if (tran == null) + { + SkipFields(fieldCount); + return; + } + + long? version = null; + + for (int i = 0; i < fieldCount; i++) + { + int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.RECORD_VERSION) + { + if (size == 7) + { + version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset); + } + else + { + throw new AerospikeException("Record version field has invalid size: " + size); + } + } + dataOffset += size; + } + + if (hasWrite) + { + tran.OnWrite(key, version, resultCode); + } + else + { + tran.OnRead(key, version); + } + } + + protected void ParseTranDeadline(Txn txn) + { + for (int i = 0; i < fieldCount; i++) + { + int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.MRT_DEADLINE) + { + int deadline = ByteUtil.LittleBytesToInt(dataBuffer, dataOffset); + txn.Deadline = deadline; + } + dataOffset += size; + } + } + protected internal sealed override void SetLength(int length) { dataOffset = length; } + // Do nothing by default. Write commands will override this method. + protected internal virtual void OnInDoubt() + { + + } + protected internal virtual bool RetryBatch ( Cluster cluster, diff --git a/AerospikeClient/Command/SyncReadCommand.cs b/AerospikeClient/Command/SyncReadCommand.cs new file mode 100644 index 00000000..dd258924 --- /dev/null +++ b/AerospikeClient/Command/SyncReadCommand.cs @@ -0,0 +1,53 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public abstract class SyncReadCommand : SyncCommand + { + protected readonly Key key; + private readonly Partition partition; + + public SyncReadCommand(Cluster cluster, Policy policy, Key key) + : base(cluster, policy) + { + this.key = key; + this.partition = Partition.Read(cluster, policy, key); + cluster.AddCommand(); + } + + protected internal override Node GetNode() + { + return partition.GetNodeRead(cluster); + } + + protected override Latency.LatencyType GetLatencyType() + { + return Latency.LatencyType.READ; + } + + protected internal override bool PrepareRetry(bool timeout) + { + partition.PrepareRetryRead(timeout); + return true; + } + + protected internal abstract override void WriteBuffer(); + + protected internal abstract override void ParseResult(IConnection conn); + } +} diff --git a/AerospikeClient/Command/SyncWriteCommand.cs b/AerospikeClient/Command/SyncWriteCommand.cs index 5380f2e3..dd5cb31a 100644 --- a/AerospikeClient/Command/SyncWriteCommand.cs +++ b/AerospikeClient/Command/SyncWriteCommand.cs @@ -15,6 +15,8 @@ * the License. */ +using System.Runtime.InteropServices; + namespace Aerospike.Client { public abstract class SyncWriteCommand : SyncCommand @@ -29,7 +31,7 @@ public SyncWriteCommand(Cluster cluster, WritePolicy writePolicy, Key key) this.writePolicy = writePolicy; this.key = key; this.partition = Partition.Write(cluster, writePolicy, key); - cluster.AddTran(); + cluster.AddCommand(); } protected internal override bool IsWrite() @@ -53,105 +55,11 @@ protected internal override bool PrepareRetry(bool timeout) return true; } - protected int ParseHeader(IConnection conn) - { - // Read header. - conn.ReadFully(dataBuffer, 8, Command.STATE_READ_HEADER); - - long sz = ByteUtil.BytesToLong(dataBuffer, 0); - int receiveSize = (int)(sz & 0xFFFFFFFFFFFFL); - - if (receiveSize <= 0) - { - throw new AerospikeException("Invalid receive size: " + receiveSize); - } - - SizeBuffer(receiveSize); - conn.ReadFully(dataBuffer, receiveSize, Command.STATE_READ_DETAIL); - conn.UpdateLastUsed(); - - ulong type = (ulong)(sz >> 48) & 0xff; - - if (type == Command.AS_MSG_TYPE) - { - dataOffset = 5; - } - else if (type == Command.MSG_TYPE_COMPRESSED) - { - int usize = (int)ByteUtil.BytesToLong(dataBuffer, 0); - byte[] ubuf = new byte[usize]; - - ByteUtil.Decompress(dataBuffer, 8, receiveSize, ubuf, usize); - dataBuffer = ubuf; - dataOffset = 13; - } - else - { - throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE); - } - - int resultCode = dataBuffer[dataOffset] & 0xFF; - dataOffset++; - int generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - int expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 8; - int fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - int opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - - if (policy.Tran == null) - { - SkipFields(fieldCount); - if (opCount > 0) - { - throw new AerospikeException("Unexpected write response opCount: " + opCount + ',' + resultCode); - } - return resultCode; - } - - long? version = null; - - for (int i = 0; i < fieldCount; i++) - { - int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - - int fieldType = dataBuffer[dataOffset++]; - int size = len - 1; - - if (fieldType == FieldType.RECORD_VERSION) - { - if (size == 7) - { - version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset); - } - else - { - throw new AerospikeException("Record version field has invalid size: " + size); - } - } - dataOffset += size; - } - - policy.Tran.OnWrite(key, version, resultCode); - - if (opCount > 0) - { - throw new AerospikeException("Unexpected write response opCount: " + opCount + ',' + resultCode); - } - return resultCode; - } - - private void SkipFields(int fieldCount) + protected internal override void OnInDoubt() { - // There can be fields in the response (setname etc). - // But for now, ignore them. Expose them to the API if needed in the future. - for (int i = 0; i < fieldCount; i++) + if (writePolicy.Txn != null) { - int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4 + fieldlen; + writePolicy.Txn.OnWriteInDoubt(key); } } diff --git a/AerospikeClient/Command/TouchCommand.cs b/AerospikeClient/Command/TouchCommand.cs index ecad9438..d2206286 100644 --- a/AerospikeClient/Command/TouchCommand.cs +++ b/AerospikeClient/Command/TouchCommand.cs @@ -17,34 +17,12 @@ namespace Aerospike.Client { - public sealed class TouchCommand : SyncCommand + public sealed class TouchCommand : SyncWriteCommand { - private readonly WritePolicy writePolicy; - private readonly Key key; - private readonly Partition partition; - public TouchCommand(Cluster cluster, WritePolicy writePolicy, Key key) - : base(cluster, writePolicy) - { - this.writePolicy = writePolicy; - this.key = key; - this.partition = Partition.Write(cluster, writePolicy, key); - cluster.AddTran(); - } - - protected internal override bool IsWrite() - { - return true; - } - - protected internal override Node GetNode() + : base(cluster, writePolicy, key) { - return partition.GetNodeWrite(cluster); - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.WRITE; + cluster.AddCommand(); } protected internal override void WriteBuffer() @@ -54,13 +32,9 @@ protected internal override void WriteBuffer() protected internal override void ParseResult(IConnection conn) { - // Read header. - conn.ReadFully(dataBuffer, MSG_TOTAL_HEADER_SIZE, Command.STATE_READ_HEADER); - conn.UpdateLastUsed(); - - int resultCode = dataBuffer[13]; + ParseHeader(conn); - if (resultCode == 0) + if (resultCode == ResultCode.OK) { return; } @@ -76,11 +50,5 @@ protected internal override void ParseResult(IConnection conn) throw new AerospikeException(resultCode); } - - protected internal override bool PrepareRetry(bool timeout) - { - partition.PrepareRetryWrite(timeout); - return true; - } } } diff --git a/AerospikeClient/Command/TranAddKeys.cs b/AerospikeClient/Command/TranAddKeys.cs deleted file mode 100644 index 39d71567..00000000 --- a/AerospikeClient/Command/TranAddKeys.cs +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class TranAddKeys : SyncWriteCommand - { - private readonly OperateArgs args; - - public TranAddKeys (Cluster cluster, Key key, OperateArgs args) - : base(cluster, args.writePolicy, key) - { - this.args = args; - } - - protected internal override void WriteBuffer() - { - SetTranAddKeys(args.writePolicy, key, args); - } - - protected internal override void ParseResult(IConnection conn) - { - // Read header. - conn.ReadFully(dataBuffer, 8, Command.STATE_READ_HEADER); - - long sz = ByteUtil.BytesToLong(dataBuffer, 0); - int receiveSize = (int)(sz & 0xFFFFFFFFFFFFL); - - if (receiveSize <= 0) - { - throw new AerospikeException("Invalid receive size: " + receiveSize); - } - - SizeBuffer(receiveSize); - conn.ReadFully(dataBuffer, receiveSize, Command.STATE_READ_DETAIL); - conn.UpdateLastUsed(); - - ulong type = (ulong)(sz >> 48) & 0xff; - - if (type == Command.AS_MSG_TYPE) - { - dataOffset = 5; - } - else if (type == Command.MSG_TYPE_COMPRESSED) - { - int usize = (int)ByteUtil.BytesToLong(dataBuffer, 0); - byte[] ubuf = new byte[usize]; - - ByteUtil.Decompress(dataBuffer, 8, receiveSize, ubuf, usize); - dataBuffer = ubuf; - dataOffset = 13; - } - else - { - throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE); - } - - int resultCode = dataBuffer[dataOffset] & 0xFF; - dataOffset++; - int generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - int expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 8; - int fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - int opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - - for (int i = 0; i < fieldCount; i++) - { - int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - - int fieldType = dataBuffer[dataOffset++]; - int size = len - 1; - - if (fieldType == FieldType.MRT_DEADLINE) - { - int deadline = ByteUtil.LittleBytesToInt(dataBuffer, dataOffset); - policy.Tran.Deadline = deadline; - } - dataOffset += size; - } - - if (resultCode == ResultCode.OK) - { - return; - } - - throw new AerospikeException(resultCode); - } - } -} diff --git a/AerospikeClient/Command/TxnAddKeys.cs b/AerospikeClient/Command/TxnAddKeys.cs new file mode 100644 index 00000000..85846e6c --- /dev/null +++ b/AerospikeClient/Command/TxnAddKeys.cs @@ -0,0 +1,53 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class TxnAddKeys : SyncWriteCommand + { + private readonly OperateArgs args; + + public TxnAddKeys (Cluster cluster, Key key, OperateArgs args) + : base(cluster, args.writePolicy, key) + { + this.args = args; + } + + protected internal override void WriteBuffer() + { + SetTxnAddKeys(args.writePolicy, key, args); + } + + protected internal override void ParseResult(IConnection conn) + { + ParseHeader(conn); + ParseTranDeadline(policy.Txn); + + if (resultCode == ResultCode.OK) + { + return; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnInDoubt() + { + policy.Txn.SetMonitorInDoubt(); + } + } +} diff --git a/AerospikeClient/Command/TranClose.cs b/AerospikeClient/Command/TxnClose.cs similarity index 80% rename from AerospikeClient/Command/TranClose.cs rename to AerospikeClient/Command/TxnClose.cs index 4bd1ff49..2c8319b9 100644 --- a/AerospikeClient/Command/TranClose.cs +++ b/AerospikeClient/Command/TxnClose.cs @@ -17,11 +17,11 @@ namespace Aerospike.Client { - public sealed class TranClose : SyncWriteCommand + public sealed class TxnClose : SyncWriteCommand { - private readonly Tran tran; + private readonly Txn tran; - public TranClose(Cluster cluster, Tran tran, WritePolicy writePolicy, Key key) + public TxnClose(Cluster cluster, Txn tran, WritePolicy writePolicy, Key key) : base(cluster, writePolicy, key) { this.tran = tran; @@ -29,12 +29,12 @@ public TranClose(Cluster cluster, Tran tran, WritePolicy writePolicy, Key key) protected internal override void WriteBuffer() { - SetTranClose(tran, key); + SetTxnClose(tran, key); } protected internal override void ParseResult(IConnection conn) { - int resultCode = ParseHeader(conn); + ParseHeader(conn); if (resultCode == ResultCode.OK || resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { @@ -43,5 +43,9 @@ protected internal override void ParseResult(IConnection conn) throw new AerospikeException(resultCode); } + + protected internal override void OnInDoubt() + { + } } } diff --git a/AerospikeClient/Command/TranMarkRollForward.cs b/AerospikeClient/Command/TxnMarkRollForward.cs similarity index 70% rename from AerospikeClient/Command/TranMarkRollForward.cs rename to AerospikeClient/Command/TxnMarkRollForward.cs index dde97904..838ce457 100644 --- a/AerospikeClient/Command/TranMarkRollForward.cs +++ b/AerospikeClient/Command/TxnMarkRollForward.cs @@ -17,11 +17,11 @@ namespace Aerospike.Client { - public sealed class TranMarkRollForward : SyncWriteCommand + public sealed class TxnMarkRollForward : SyncWriteCommand { - private readonly Tran tran; + private readonly Txn tran; - public TranMarkRollForward(Cluster cluster, Tran tran, WritePolicy writePolicy, Key key) + public TxnMarkRollForward(Cluster cluster, Txn tran, WritePolicy writePolicy, Key key) : base(cluster, writePolicy, key) { this.tran = tran; @@ -29,13 +29,15 @@ public TranMarkRollForward(Cluster cluster, Tran tran, WritePolicy writePolicy, protected internal override void WriteBuffer() { - SetTranMarkRollForward(tran, key); + SetTxnMarkRollForward(tran, key); } protected internal override void ParseResult(IConnection conn) { - int resultCode = ParseHeader(conn); + ParseHeader(conn); + // BIN_EXISTS_ERROR is considered a success because it means a previous attempt already + // succeeded in notifying the server that the MRT will be rolled forward. if (resultCode == ResultCode.OK || resultCode == ResultCode.BIN_EXISTS_ERROR) { return; @@ -43,5 +45,9 @@ protected internal override void ParseResult(IConnection conn) throw new AerospikeException(resultCode); } + + protected internal override void OnInDoubt() + { + } } } diff --git a/AerospikeClient/Command/TranMonitor.cs b/AerospikeClient/Command/TxnMonitor.cs similarity index 79% rename from AerospikeClient/Command/TranMonitor.cs rename to AerospikeClient/Command/TxnMonitor.cs index 7fa7584c..c20873f3 100644 --- a/AerospikeClient/Command/TranMonitor.cs +++ b/AerospikeClient/Command/TxnMonitor.cs @@ -17,7 +17,7 @@ namespace Aerospike.Client { - public sealed class TranMonitor + public sealed class TxnMonitor { private static readonly ListPolicy OrderedListPolicy = new(ListOrder.ORDERED, ListWriteFlags.ADD_UNIQUE | ListWriteFlags.NO_FAIL | ListWriteFlags.PARTIAL); @@ -27,27 +27,27 @@ public sealed class TranMonitor public static void AddKey(Cluster cluster, WritePolicy policy, Key cmdKey) { - Tran tran = policy.Tran; + Txn txn = policy.Txn; - if (tran.Writes.Contains(cmdKey)) + if (txn.Writes.Contains(cmdKey)) { // Transaction monitor already contains this key. return; } - Operation[] ops = GetTranOps(tran, cmdKey); + Operation[] ops = GetTranOps(txn, cmdKey); AddWriteKeys(cluster, policy, ops); } public static void AddKeys(Cluster cluster, BatchPolicy policy, Key[] keys) { - Operation[] ops = GetTranOps(policy.Tran, keys); + Operation[] ops = GetTranOps(policy.Txn, keys); AddWriteKeys(cluster, policy, ops); } public static void AddKeys(Cluster cluster, BatchPolicy policy, List records) { - Operation[] ops = GetTranOps(policy.Tran, records); + Operation[] ops = GetTranOps(policy.Txn, records); if (ops != null) { @@ -55,9 +55,9 @@ public static void AddKeys(Cluster cluster, BatchPolicy policy, List list = new(keys.Length); foreach (Key key in keys) { - tran.Ns = key.ns; + tran.SetNamespace(key.ns); list.Add(Value.Get(key.digest)); } return GetTranOps(tran, list); } - public static Operation[] GetTranOps(Tran tran, List records) + public static Operation[] GetTranOps(Txn tran, List records) { List list = new(records.Count); foreach (BatchRecord br in records) { - tran.Ns = br.key.ns; + tran.SetNamespace(br.key.ns); if (br.hasWrite) { @@ -108,7 +108,7 @@ public static Operation[] GetTranOps(Tran tran, List records) return GetTranOps(tran, list); } - private static Operation[] GetTranOps(Tran tran, List list) + private static Operation[] GetTranOps(Txn tran, List list) { if (tran.Deadline == 0) { @@ -128,14 +128,14 @@ private static Operation[] GetTranOps(Tran tran, List list) private static void AddWriteKeys(Cluster cluster, Policy policy, Operation[] ops) { - Key tranKey = GetTranMonitorKey(policy.Tran); + Key tranKey = GetTxnMonitorKey(policy.Txn); WritePolicy wp = CopyTimeoutPolicy(policy); OperateArgs args = new(wp, null, null, ops); - TranAddKeys cmd = new(cluster, tranKey, args); + TxnAddKeys cmd = new(cluster, tranKey, args); cmd.Execute(); } - public static Key GetTranMonitorKey(Tran tran) + public static Key GetTxnMonitorKey(Txn tran) { return new Key(tran.Ns, " keySet = tran.Writes; + txnKey = TxnMonitor.GetTxnMonitorKey(txn); + HashSet keySet = txn.Writes; if (keySet.Count != 0) { // Tell MRT monitor that a roll-forward will commence. try { - MarkRollForward(writePolicy, tranKey); + MarkRollForward(writePolicy, txnKey); } catch (Exception t) { @@ -104,23 +105,25 @@ public void Commit(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) } } - if (tran.Deadline != 0) + if (txn.Deadline != 0) { // Remove MRT monitor. try { - Close(writePolicy, tranKey); + Close(writePolicy, txnKey); } catch (Exception t) { - throw new AerospikeException.Commit(CommitErrorType.CLOSE_ABANDONED, verifyRecords, rollRecords, t); + return CommitStatusType.CLOSE_ABANDONED; } } + + return CommitStatusType.OK; } - public void Abort(BatchPolicy rollPolicy) + public AbortStatusType Abort(BatchPolicy rollPolicy) { - HashSet keySet = tran.Writes; + HashSet keySet = txn.Writes; if (keySet.Count != 0) { @@ -128,30 +131,32 @@ public void Abort(BatchPolicy rollPolicy) { Roll(rollPolicy, Command.INFO4_MRT_ROLL_BACK); } - catch (Exception t) + catch (Exception) { - throw new AerospikeException.Abort(AbortErrorType.ROLL_BACK_ABANDONED, rollRecords, t); + return AbortStatusType.ROLL_BACK_ABANDONED; } } - if (tran.Deadline != 0) + if (txn.Deadline != 0) { try { WritePolicy writePolicy = new(rollPolicy); - Key tranKey = TranMonitor.GetTranMonitorKey(tran); - Close(writePolicy, tranKey); + Key txnKey = TxnMonitor.GetTxnMonitorKey(txn); + Close(writePolicy, txnKey); } catch (Exception t) { - throw new AerospikeException.Abort(AbortErrorType.CLOSE_ABANDONED, rollRecords, t); + return AbortStatusType.CLOSE_ABANDONED; } } + + return AbortStatusType.OK; } private void Verify(BatchPolicy verifyPolicy) { // Validate record versions in a batch. - HashSet> reads = tran.Reads.ToHashSet>(); + HashSet> reads = txn.Reads.ToHashSet>(); int max = reads.Count; if (max == 0) { @@ -181,8 +186,8 @@ private void Verify(BatchPolicy verifyPolicy) foreach (BatchNode bn in bns) { - commands[count++] = new BatchTranVerify( - cluster, bn, verifyPolicy, tran, keys, versions, records, status); + commands[count++] = new BatchTxnVerify( + cluster, bn, verifyPolicy, txn, keys, versions, records, status); } BatchExecutor.Execute(cluster, verifyPolicy, commands, status); @@ -193,16 +198,16 @@ private void Verify(BatchPolicy verifyPolicy) } } - private void MarkRollForward(WritePolicy writePolicy, Key tranKey) + private void MarkRollForward(WritePolicy writePolicy, Key txnKey) { // Tell MRT monitor that a roll-forward will commence. - TranMarkRollForward cmd = new(cluster, tran, writePolicy, tranKey); + TxnMarkRollForward cmd = new(cluster, txn, writePolicy, txnKey); cmd.Execute(); } - private void Roll(BatchPolicy rollPolicy, int tranAttr) + private void Roll(BatchPolicy rollPolicy, int txnAttr) { - HashSet keySet = tran.Writes; + HashSet keySet = txn.Writes; if (keySet.Count == 0) { @@ -219,44 +224,44 @@ private void Roll(BatchPolicy rollPolicy, int tranAttr) this.rollRecords = records; - // Copy tran roll policy because it needs to be modified. + // Copy txn roll policy because it needs to be modified. BatchPolicy batchPolicy = new(rollPolicy); BatchAttr attr = new(); - attr.SetTran(tranAttr); + attr.SetTxn(txnAttr); BatchStatus status = new(true); - // generate() requires a null tran instance. + // generate() requires a null txn instance. List bns = BatchNode.GenerateList(cluster, batchPolicy, keys, records, true, status); BatchCommand[] commands = new BatchCommand[bns.Count]; - // Batch roll forward requires the tran instance. - batchPolicy.Tran = tran; + // Batch roll forward requires the txn instance. + batchPolicy.Txn = txn; int count = 0; foreach (BatchNode bn in bns) { - commands[count++] = new BatchTranRoll( + commands[count++] = new BatchTxnRoll( cluster, bn, batchPolicy, keys, records, attr, status); } BatchExecutor.Execute(cluster, batchPolicy, commands, status); if (!status.GetStatus()) { - string rollString = tranAttr == Command.INFO4_MRT_ROLL_FORWARD ? "commit" : "abort"; + string rollString = txnAttr == Command.INFO4_MRT_ROLL_FORWARD ? "commit" : "abort"; throw new AerospikeException("Failed to " + rollString + " one or more records"); } } - private void Close(WritePolicy writePolicy, Key tranKey) + private void Close(WritePolicy writePolicy, Key txnKey) { // Delete MRT monitor on server. - TranClose cmd = new(cluster, tran, writePolicy, tranKey); + TxnClose cmd = new(cluster, txn, writePolicy, txnKey); cmd.Execute(); // Reset MRT on client. - tran.Clear(); + txn.Clear(); } } } diff --git a/AerospikeClient/Command/WriteCommand.cs b/AerospikeClient/Command/WriteCommand.cs index 05b9eb91..4363427b 100644 --- a/AerospikeClient/Command/WriteCommand.cs +++ b/AerospikeClient/Command/WriteCommand.cs @@ -27,7 +27,7 @@ public WriteCommand(Cluster cluster, WritePolicy writePolicy, Key key, Bin[] bin { this.bins = bins; this.operation = operation; - cluster.AddTran(); + cluster.AddCommand(); } protected internal override void WriteBuffer() @@ -37,13 +37,9 @@ protected internal override void WriteBuffer() protected internal override void ParseResult(IConnection conn) { - // Read header. - conn.ReadFully(dataBuffer, MSG_TOTAL_HEADER_SIZE, Command.STATE_READ_HEADER); - conn.UpdateLastUsed(); + ParseHeader(conn); - int resultCode = dataBuffer[13]; - - if (resultCode == 0) + if (resultCode == ResultCode.OK) { return; } diff --git a/AerospikeClient/Listener/AbortListener.cs b/AerospikeClient/Listener/AbortListener.cs index 3a86682e..4e7f29b5 100644 --- a/AerospikeClient/Listener/AbortListener.cs +++ b/AerospikeClient/Listener/AbortListener.cs @@ -14,6 +14,8 @@ * License for the specific language governing permissions and limitations under * the License. */ +using static Aerospike.Client.AbortStatus; + namespace Aerospike.Client { /// @@ -24,7 +26,7 @@ public interface AbortListener /// /// This method is called when the abort succeeds. /// - void OnSuccess(); + void OnSuccess(AbortStatusType status); /// /// This method is called when the abort fails. diff --git a/AerospikeClient/Listener/CommitListener.cs b/AerospikeClient/Listener/CommitListener.cs index f77bc571..6e32a04d 100644 --- a/AerospikeClient/Listener/CommitListener.cs +++ b/AerospikeClient/Listener/CommitListener.cs @@ -14,6 +14,8 @@ * License for the specific language governing permissions and limitations under * the License. */ +using static Aerospike.Client.CommitStatus; + namespace Aerospike.Client { /// @@ -24,7 +26,7 @@ public interface CommitListener /// /// This method is called when the records are verified and the commit succeeds. /// - void OnSuccess(); + void OnSuccess(CommitStatusType status); /// /// This method is called when the commit fails. diff --git a/AerospikeClient/Main/AbortStatus.cs b/AerospikeClient/Main/AbortStatus.cs new file mode 100644 index 00000000..3bbe81e1 --- /dev/null +++ b/AerospikeClient/Main/AbortStatus.cs @@ -0,0 +1,45 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + /// + /// Multi-record transaction (MRT) abort error status code + /// + public static class AbortStatus + { + public enum AbortStatusType + { + OK, + ALREADY_ATTEMPTED, + ROLL_BACK_ABANDONED, + CLOSE_ABANDONED + } + + public static string AbortErrorToString(AbortStatusType status) + { + return status switch + { + AbortStatusType.OK => "Abort succeeded.", + AbortStatusType.ALREADY_ATTEMPTED => "Abort or commit already attempted.", + AbortStatusType.ROLL_BACK_ABANDONED => "MRT client roll back abandoned. Server will eventually abort the MRT.", + AbortStatusType.CLOSE_ABANDONED => "MRT has been rolled back, but MRT client close was abandoned. Server will eventually close the MRT.", + _ => "Unexpected AbortStatusType." + }; + } + } +} diff --git a/AerospikeClient/Main/AerospikeClient.cs b/AerospikeClient/Main/AerospikeClient.cs index 94d3200d..d656a1bf 100644 --- a/AerospikeClient/Main/AerospikeClient.cs +++ b/AerospikeClient/Main/AerospikeClient.cs @@ -97,13 +97,13 @@ public class AerospikeClient : IDisposable, IAerospikeClient /// /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. /// - public BatchPolicy tranVerifyPolicyDefault; + public TxnVerifyPolicy txnVerifyPolicyDefault; /// /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) /// or back(abort) in a batch. /// - public BatchPolicy tranRollPolicyDefault; + public TxnRollPolicy txnRollPolicyDefault; /// /// Default info policy that is used when info command policy is null. @@ -201,8 +201,8 @@ public AerospikeClient(ClientPolicy policy, params Host[] hosts) this.batchWritePolicyDefault = policy.batchWritePolicyDefault; this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; - this.tranVerifyPolicyDefault = policy.tranVerifyPolicyDefault; - this.tranRollPolicyDefault = policy.tranRollPolicyDefault; + this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault; + this.txnRollPolicyDefault = policy.txnRollPolicyDefault; this.infoPolicyDefault = policy.infoPolicyDefault; this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); @@ -227,8 +227,8 @@ protected internal AerospikeClient(ClientPolicy policy) this.batchWritePolicyDefault = policy.batchWritePolicyDefault; this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; - this.tranVerifyPolicyDefault = policy.tranVerifyPolicyDefault; - this.tranRollPolicyDefault = policy.tranRollPolicyDefault; + this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault; + this.txnRollPolicyDefault = policy.txnRollPolicyDefault; this.infoPolicyDefault = policy.infoPolicyDefault; } else @@ -242,8 +242,8 @@ protected internal AerospikeClient(ClientPolicy policy) this.batchWritePolicyDefault = new BatchWritePolicy(); this.batchDeletePolicyDefault = new BatchDeletePolicy(); this.batchUDFPolicyDefault = new BatchUDFPolicy(); - this.tranVerifyPolicyDefault = new TranVerifyPolicy(); - this.tranRollPolicyDefault= new TranRollPolicy(); + this.txnVerifyPolicyDefault = new TxnVerifyPolicy(); + this.txnRollPolicyDefault= new TxnRollPolicy(); this.infoPolicyDefault = new InfoPolicy(); } this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); @@ -340,20 +340,20 @@ public BatchUDFPolicy BatchUDFPolicyDefault /// /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. /// - public BatchPolicy TranVerifyPolicyDefault + public TxnVerifyPolicy TxnVerifyPolicyDefault { - get { return new BatchPolicy(tranVerifyPolicyDefault); } - set { tranVerifyPolicyDefault = value; } + get { return new TxnVerifyPolicy(txnVerifyPolicyDefault); } + set { txnVerifyPolicyDefault = value; } } /// /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) /// or back(abort) in a batch. /// - public BatchPolicy TranRollPolicyDefault + public TxnRollPolicy TxnRollPolicyDefault { - get { return new BatchPolicy(tranRollPolicyDefault); } - set { tranRollPolicyDefault = value; } + get { return new TxnRollPolicy(txnRollPolicyDefault); } + set { txnRollPolicyDefault = value; } } /// @@ -464,19 +464,22 @@ public ClusterStats GetClusterStats() /// /// Attempt to commit the given multi-record transaction. First, the expected record versions are - /// sent to the server nodes for verification.If all nodes return success, the transaction is + /// sent to the server nodes for verification. If all nodes return success, the transaction is /// committed.Otherwise, the transaction is aborted. ///

/// Requires server version 8.0+ ///

///
- /// multi-record transaction - public void Commit(Tran tran) + /// multi-record transaction + public CommitStatus.CommitStatusType Commit(Txn txn) { - tran.SetRollAttempted(); + if (!txn.SetRollAttempted()) + { + return CommitStatus.CommitStatusType.ALREADY_ATTEMPTED; + } - TranRoll tm = new TranRoll(cluster, tran); - tm.Commit(tranVerifyPolicyDefault, tranRollPolicyDefault); + TxnRoll tm = new TxnRoll(cluster, txn); + return tm.Commit(txnVerifyPolicyDefault, txnRollPolicyDefault); } /// @@ -485,13 +488,16 @@ public void Commit(Tran tran) /// Requires server version 8.0+ ///

///
- /// multi-record transaction - public void Abort(Tran tran) + /// multi-record transaction + public AbortStatus.AbortStatusType Abort(Txn txn) { - tran.SetRollAttempted(); + if (!txn.SetRollAttempted()) + { + return AbortStatus.AbortStatusType.ALREADY_ATTEMPTED; + } - TranRoll tm = new TranRoll(cluster, tran); - tm.Abort(tranRollPolicyDefault); + TxnRoll tm = new TxnRoll(cluster, txn); + return tm.Abort(txnRollPolicyDefault); } //------------------------------------------------------- @@ -500,7 +506,7 @@ public void Abort(Tran tran) /// /// Write record bin(s). - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeouts, record expiration and how the command is /// handled when the record already exists. /// /// write configuration parameters, pass in null for defaults @@ -513,6 +519,12 @@ public void Put(WritePolicy policy, Key key, params Bin[] bins) { policy = writePolicyDefault; } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.WRITE); command.Execute(); } @@ -523,7 +535,7 @@ public void Put(WritePolicy policy, Key key, params Bin[] bins) /// /// Append bin string values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for string values. /// @@ -537,13 +549,19 @@ public void Append(WritePolicy policy, Key key, params Bin[] bins) { policy = writePolicyDefault; } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.APPEND); command.Execute(); } /// /// Prepend bin string values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call works only for string values. /// @@ -557,6 +575,12 @@ public void Prepend(WritePolicy policy, Key key, params Bin[] bins) { policy = writePolicyDefault; } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.PREPEND); command.Execute(); } @@ -567,7 +591,7 @@ public void Prepend(WritePolicy policy, Key key, params Bin[] bins) /// /// Add integer/double bin values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// write configuration parameters, pass in null for defaults @@ -580,6 +604,12 @@ public void Add(WritePolicy policy, Key key, params Bin[] bins) { policy = writePolicyDefault; } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.ADD); command.Execute(); } @@ -591,7 +621,7 @@ public void Add(WritePolicy policy, Key key, params Bin[] bins) /// /// Delete record for specified key. /// Return whether record existed on server before deletion. - /// The policy specifies the transaction timeout. + /// The policy specifies the command timeout. /// /// delete configuration parameters, pass in null for defaults /// unique record identifier @@ -602,6 +632,12 @@ public bool Delete(WritePolicy policy, Key key) { policy = writePolicyDefault; } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + DeleteCommand command = new DeleteCommand(cluster, policy, key); command.Execute(); return command.Existed(); @@ -635,6 +671,11 @@ public BatchResults Delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePoli deletePolicy = batchDeletePolicyDefault; } + if (batchPolicy.Txn != null) + { + TxnMonitor.AddKeys(cluster, batchPolicy, keys); + } + BatchAttr attr = new BatchAttr(); attr.SetDelete(deletePolicy); @@ -744,6 +785,12 @@ public void Touch(WritePolicy policy, Key key) { policy = writePolicyDefault; } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + TouchCommand command = new TouchCommand(cluster, policy, key); command.Execute(); } @@ -766,6 +813,12 @@ public bool Exists(Policy policy, Key key) { policy = readPolicyDefault; } + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(key.ns); + } + ExistsCommand command = new ExistsCommand(cluster, policy, key); command.Execute(); return command.Exists(); @@ -790,6 +843,10 @@ public bool[] Exists(BatchPolicy policy, Key[] keys) policy = batchPolicyDefault; } + if (policy.Txn != null) + { + policy.Txn.SetNamespace(keys); + } bool[] existsArray = new bool[keys.Length]; @@ -842,6 +899,9 @@ public Record Get(Policy policy, Key key) { policy = readPolicyDefault; } + + policy.Txn?.SetNamespace(key.ns); + ReadCommand command = new ReadCommand(cluster, policy, key); command.Execute(); return command.Record; @@ -862,6 +922,12 @@ public Record Get(Policy policy, Key key, params string[] binNames) { policy = readPolicyDefault; } + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(key.ns); + } + ReadCommand command = new ReadCommand(cluster, policy, key, binNames); command.Execute(); return command.Record; @@ -881,6 +947,12 @@ public Record GetHeader(Policy policy, Key key) { policy = readPolicyDefault; } + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(key.ns); + } + ReadHeaderCommand command = new ReadHeaderCommand(cluster, policy, key); command.Execute(); return command.Record; @@ -913,6 +985,11 @@ public bool Get(BatchPolicy policy, List records) policy = batchPolicyDefault; } + if (policy.Txn != null) + { + policy.Txn.SetNamespace(records); + } + BatchStatus status = new BatchStatus(true); List batchNodes = BatchNode.GenerateList(cluster, policy, records, status); BatchCommand[] commands = new BatchCommand[batchNodes.Count]; @@ -946,6 +1023,13 @@ public Record[] Get(BatchPolicy policy, Key[] keys) policy = batchPolicyDefault; } + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(keys); + } + + Record[] records = new Record[keys.Length]; try @@ -1000,6 +1084,11 @@ public Record[] Get(BatchPolicy policy, Key[] keys, params string[] binNames) policy = batchPolicyDefault; } + if (policy.Txn != null) + { + policy.Txn.SetNamespace(keys); + } + Record[] records = new Record[keys.Length]; try @@ -1054,6 +1143,11 @@ public Record[] Get(BatchPolicy policy, Key[] keys, params Operation[] ops) policy = batchPolicyDefault; } + if (policy.Txn != null) + { + policy.Txn.SetNamespace(keys); + } + Record[] records = new Record[keys.Length]; try @@ -1107,6 +1201,13 @@ public Record[] GetHeader(BatchPolicy policy, Key[] keys) policy = batchPolicyDefault; } + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(keys); + } + + Record[] records = new Record[keys.Length]; try @@ -1210,9 +1311,31 @@ public Record Join(BatchPolicy policy, Key key, params Join[] joins) public Record Operate(WritePolicy policy, Key key, params Operation[] operations) { OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, operations); - OperateCommand command = new OperateCommand(cluster, key, args); - command.Execute(); - return command.Record; + + if (args.hasWrite) + { + policy = args.writePolicy; + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + OperateCommandWrite command = new(cluster, key, args); + command.Execute(); + return command.Record; + } + else + { + if (policy?.Txn != null) + { + policy.Txn.SetNamespace(key.ns); + } + + OperateCommandRead command = new(cluster, key, args); + command.Execute(); + return command.Record; + } } //------------------------------------------------------- @@ -1247,6 +1370,11 @@ public bool Operate(BatchPolicy policy, List records) policy = batchParentPolicyWriteDefault; } + if (policy.Txn != null) + { + TxnMonitor.AddKeys(cluster, policy, records); + } + BatchStatus status = new BatchStatus(true); List batchNodes = BatchNode.GenerateList(cluster, policy, records, status); BatchCommand[] commands = new BatchCommand[batchNodes.Count]; @@ -1293,6 +1421,11 @@ public BatchResults Operate(BatchPolicy batchPolicy, BatchWritePolicy writePolic writePolicy = batchWritePolicyDefault; } + if (batchPolicy.Txn != null) + { + TxnMonitor.AddKeys(cluster, batchPolicy, keys); + } + BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops); BatchRecord[] records = new BatchRecord[keys.Length]; @@ -1567,13 +1700,19 @@ public void RemoveUdf(InfoPolicy policy, string serverPath) /// server package name where user defined function resides /// user defined function /// arguments passed in to user defined function - /// if transaction fails + /// if command fails public object Execute(WritePolicy policy, Key key, string packageName, string functionName, params Value[] args) { if (policy == null) { policy = writePolicyDefault; } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + ExecuteCommand command = new ExecuteCommand(cluster, policy, key, packageName, functionName, args); command.Execute(); @@ -1633,6 +1772,11 @@ public BatchResults Execute(BatchPolicy batchPolicy, BatchUDFPolicy udfPolicy, K udfPolicy = batchUDFPolicyDefault; } + if (batchPolicy.Txn != null) + { + TxnMonitor.AddKeys(cluster, batchPolicy, keys); + } + byte[] argBytes = Packer.Pack(functionArgs); BatchAttr attr = new BatchAttr(); @@ -1695,7 +1839,7 @@ public ExecuteTask Execute(WritePolicy policy, Statement statement, string packa statement.FunctionName = functionName; statement.FunctionArgs = functionArgs; - cluster.AddTran(); + cluster.AddCommand(); ulong taskId = statement.PrepareTaskId(); Node[] nodes = cluster.ValidateNodes(); @@ -1734,7 +1878,7 @@ public ExecuteTask Execute(WritePolicy policy, Statement statement, params Opera statement.Operations = operations; } - cluster.AddTran(); + cluster.AddCommand(); ulong taskId = statement.PrepareTaskId(); Node[] nodes = cluster.ValidateNodes(); diff --git a/AerospikeClient/Main/AerospikeException.cs b/AerospikeClient/Main/AerospikeException.cs index ec7268b0..312fd4ae 100644 --- a/AerospikeClient/Main/AerospikeException.cs +++ b/AerospikeClient/Main/AerospikeException.cs @@ -15,7 +15,7 @@ * the License. */ using System.Text; -using static Aerospike.Client.AbortError; +using static Aerospike.Client.AbortStatus; using static Aerospike.Client.CommitError; namespace Aerospike.Client @@ -167,7 +167,7 @@ public bool ShouldSerializeNode() } /// - /// Transaction policy. + /// Command policy. /// public Policy Policy { @@ -208,7 +208,7 @@ public int Iteration } /// - /// Is it possible that write transaction may have completed. + /// Is it possible that write command may have completed. /// public bool InDoubt { @@ -219,7 +219,7 @@ public bool InDoubt } /// - /// Set whether it is possible that the write transaction may have completed + /// Set whether it is possible that the write command may have completed /// even though this exception was generated. This may be the case when a /// client error occurs (like timeout) after the command was sent to the server. /// @@ -604,7 +604,7 @@ public EndOfGRPCStream(int resultCode) } /// - /// Exception thrown when {@link AerospikeClient#commit(com.aerospike.client.Tran)} fails. + /// Exception thrown when {@link AerospikeClient#commit(com.aerospike.client.Txn)} fails. /// public sealed class Commit : AerospikeException { @@ -656,28 +656,28 @@ public override string Message } /// - /// Exception thrown when {@link AerospikeClient#abort(com.aerospike.client.Tran)} fails. + /// Exception thrown when {@link AerospikeClient#abort(com.aerospike.client.Txn)} fails. /// public sealed class Abort : AerospikeException { /// /// Error status of the attempted abort. /// - public readonly AbortErrorType Error; + public readonly AbortStatusType Error; /// /// Roll backward result for each write key in the MRT. May be null if failure occurred before roll backward. /// public readonly BatchRecord[] RollRecords; - public Abort(AbortErrorType error, BatchRecord[] rollRecords) + public Abort(AbortStatusType error, BatchRecord[] rollRecords) : base(ResultCode.TRAN_FAILED, AbortErrorToString(error)) { this.Error = error; this.RollRecords = rollRecords; } - public Abort(AbortErrorType error, BatchRecord[] rollRecords, Exception cause) + public Abort(AbortStatusType error, BatchRecord[] rollRecords, Exception cause) : base(ResultCode.TRAN_FAILED, AbortErrorToString(error), cause) { this.Error = error; diff --git a/AerospikeClient/Main/BatchRecord.cs b/AerospikeClient/Main/BatchRecord.cs index cc59de11..9196404b 100644 --- a/AerospikeClient/Main/BatchRecord.cs +++ b/AerospikeClient/Main/BatchRecord.cs @@ -39,7 +39,7 @@ public class BatchRecord public int resultCode; /// - /// Is it possible that the write transaction may have completed even though an error + /// Is it possible that the write command may have completed even though an error /// occurred for this record. This may be the case when a client error occurs (like timeout) /// after the command was sent to the server. /// diff --git a/AerospikeClient/Main/CommitStatus.cs b/AerospikeClient/Main/CommitStatus.cs new file mode 100644 index 00000000..021b88f7 --- /dev/null +++ b/AerospikeClient/Main/CommitStatus.cs @@ -0,0 +1,47 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +using static Aerospike.Client.AbortStatus; + +namespace Aerospike.Client +{ + /// + /// Multi-record transaction (MRT) commit status code. + /// + public static class CommitStatus + { + public enum CommitStatusType + { + OK, + ALREADY_ATTEMPTED, + ROLL_FORWARD_ABANDONED, + CLOSE_ABANDONED + } + + public static string CommitErrorToString(CommitStatusType status) + { + return status switch + { + CommitStatusType.OK => "Commit succeeded.", + CommitStatusType.ALREADY_ATTEMPTED => "Commit or abort already attempted.", + CommitStatusType.ROLL_FORWARD_ABANDONED => "MRT client roll forward abandoned. Server will eventually commit the MRT.", + CommitStatusType.CLOSE_ABANDONED => "MRT has been rolled back, but MRT client close was abandoned. Server will eventually close the MRT.", + _ => "Unexpected AbortStatusType." + }; + } + } +} diff --git a/AerospikeClient/Main/IAerospikeClient.cs b/AerospikeClient/Main/IAerospikeClient.cs index 8e39e632..5adfca00 100644 --- a/AerospikeClient/Main/IAerospikeClient.cs +++ b/AerospikeClient/Main/IAerospikeClient.cs @@ -14,7 +14,6 @@ * License for the specific language governing permissions and limitations under * the License. */ -using System.Diagnostics.Metrics; using System.Reflection; namespace Aerospike.Client @@ -76,13 +75,13 @@ public interface IAerospikeClient /// /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. /// - BatchPolicy TranVerifyPolicyDefault { get; set; } + TxnVerifyPolicy TxnVerifyPolicyDefault { get; set; } /// /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) /// or back(abort) in a batch. /// - BatchPolicy TranRollPolicyDefault { get; set; } + TxnRollPolicy TxnRollPolicyDefault { get; set; } /// /// Default info policy that is used when info command policy is null. @@ -135,23 +134,23 @@ public interface IAerospikeClient /// /// Attempt to commit the given multi-record transaction. First, the expected record versions are - /// sent to the server nodes for verification.If all nodes return success, the transaction is - /// committed.Otherwise, the transaction is aborted. + /// sent to the server nodes for verification.If all nodes return success, the command is + /// committed.Otherwise, the command is aborted. ///

/// Requires server version 8.0+ ///

///
- /// multi-record transaction - void Commit(Tran tran); + /// multi-record transaction + CommitStatus.CommitStatusType Commit(Txn txn); /// /// Abort and rollback the given multi-record transaction. - ///

- /// Requires server version 8.0+ + ///

+ /// Requires server version 8.0+ ///

///
- /// multi-record transaction - void Abort(Tran tran); + /// multi-record transaction + AbortStatus.AbortStatusType Abort(Txn txn); //------------------------------------------------------- // Write Record Operations @@ -159,7 +158,7 @@ public interface IAerospikeClient /// /// Write record bin(s). - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// write configuration parameters, pass in null for defaults @@ -174,7 +173,7 @@ public interface IAerospikeClient /// /// Append bin string values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for string values. /// @@ -186,7 +185,7 @@ public interface IAerospikeClient /// /// Prepend bin string values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call works only for string values. /// @@ -202,7 +201,7 @@ public interface IAerospikeClient /// /// Add integer bin values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for integer values. /// @@ -219,7 +218,7 @@ public interface IAerospikeClient /// /// Delete record for specified key. /// Return whether record existed on server before deletion. - /// The policy specifies the transaction timeout. + /// The policy specifies the command timeout. /// /// delete configuration parameters, pass in null for defaults /// unique record identifier @@ -627,7 +626,7 @@ public interface IAerospikeClient /// server package name where user defined function resides /// user defined function /// arguments passed in to user defined function - /// if transaction fails + /// if command fails object Execute(WritePolicy policy, Key key, string packageName, string functionName, params Value[] args); /// diff --git a/AerospikeClient/Main/Key.cs b/AerospikeClient/Main/Key.cs index 16f05bed..f929545b 100644 --- a/AerospikeClient/Main/Key.cs +++ b/AerospikeClient/Main/Key.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2018 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -14,7 +14,6 @@ * License for the specific language governing permissions and limitations under * the License. */ -using System.Linq; namespace Aerospike.Client { @@ -378,11 +377,9 @@ public Key(string ns, byte[] digest, string setName, Value userKey) /// public override int GetHashCode() { - int result = 1; - foreach (byte element in digest) - { - result = 31 * result + element; - } + // The digest is already a hash, so pick 4 bytes from the 20 byte digest at a + // random offset (in this case 8). + int result = ByteUtil.LittleBytesToInt(digest, 8) + 31; return 31 * result + ns.GetHashCode(); } diff --git a/AerospikeClient/Main/ResultCode.cs b/AerospikeClient/Main/ResultCode.cs index 83a0416b..edd6e470 100644 --- a/AerospikeClient/Main/ResultCode.cs +++ b/AerospikeClient/Main/ResultCode.cs @@ -22,7 +22,7 @@ namespace Aerospike.Client public sealed class ResultCode { /// - /// Multi-record transaction failed + /// Multi-record transaction failed. /// Value: -17 /// public const int TRAN_FAILED = -17; @@ -271,7 +271,7 @@ public sealed class ResultCode public const int OP_NOT_APPLICABLE = 26; /// - /// The transaction was not performed because the filter was false. + /// The command was not performed because the filter was false. /// Value: 27 /// public const int FILTERED_OUT = 27; @@ -690,7 +690,7 @@ public static string GetResultString(int resultCode) return "Operation not applicable"; case FILTERED_OUT: - return "Transaction filtered out"; + return "Command filtered out"; case LOST_CONFLICT: return "Transaction failed due to conflict with XDR"; diff --git a/AerospikeClient/Main/Tran.cs b/AerospikeClient/Main/Txn.cs similarity index 63% rename from AerospikeClient/Main/Tran.cs rename to AerospikeClient/Main/Txn.cs index 009cae98..03b3f156 100644 --- a/AerospikeClient/Main/Tran.cs +++ b/AerospikeClient/Main/Txn.cs @@ -15,28 +15,30 @@ * the License. */ +using System; using System.Collections.Concurrent; -using System.Collections.Generic; namespace Aerospike.Client { /// /// Mutli-record transaction (MRT). Each command in the MRT must use the same namespace. /// - public class Tran + public class Txn { public long Id { get; private set; } public ConcurrentDictionary Reads { get; private set; } public HashSet Writes { get; private set; } - public string Ns { get; set; } + public string Ns { get; private set; } public int Deadline { get; set; } + private bool monitorInDoubt; + private bool rollAttempted; /// /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with default capacities. /// - public Tran() + public Txn() { Id = CreateId(); Reads = new ConcurrentDictionary(); @@ -48,7 +50,7 @@ public Tran() ///
/// expected number of record reads in the MRT. Minimum value is 16. /// expected number of record writes in the MRT. Minimum value is 16. - public Tran(int readsCapacity, int writesCapacity) + public Txn(int readsCapacity, int writesCapacity) { if (readsCapacity < 16) { @@ -86,8 +88,6 @@ private static long CreateId() /// internal void OnRead(Key key, long? version) { - Ns = key.ns; - if (version.HasValue) { Reads.TryAdd(key, version.Value); @@ -99,9 +99,16 @@ internal void OnRead(Key key, long? version) ///
/// /// - public long GetReadVersion(Key key) + public long? GetReadVersion(Key key) { - return Reads[key]; + if (Reads.ContainsKey(key)) + { + return Reads[key]; + } + else + { + return null; + } } /// @@ -128,14 +135,87 @@ public void OnWrite(Key key, long? version, int resultCode) } } - public void SetRollAttempted() + /// + /// Add key to write hash when write command is in doubt (usually caused by timeout). + /// + public void OnWriteInDoubt(Key key) + { + Reads.Remove(key, out _); + Writes.Add(key); + } + + /// + /// Set MRT namespace only if doesn't already exist. + /// If namespace already exists, verify new namespace is the same. + /// + public void SetNamespace(string ns) + { + if (Ns == null) + { + Ns = ns; + } + else if (!Ns.Equals(ns)) { + throw new AerospikeException("Namespace must be the same for all commands in the MRT. orig: " + + Ns + " new: " + ns); + } + } + + /// + /// Set MRT namespaces for each key only if doesn't already exist. + /// If namespace already exists, verify new namespace is the same. + /// + public void SetNamespace(Key[] keys) + { + foreach (Key key in keys) + { + SetNamespace(key.ns); + } + } + + /// + /// Set MRT namespaces for each key only if doesn't already exist. + /// If namespace already exists, verify new namespace is the same. + /// + public void SetNamespace(List records) + { + foreach (BatchRead br in records) + { + SetNamespace(br.key.ns); + } + } + + /// + /// Set that the MRT monitor existence is in doubt. + /// + public void SetMonitorInDoubt() + { + this.monitorInDoubt = true; + } + + /// + /// Does MRT monitor record exist or is in doubt. + /// + public bool MonitorMightExist() + { + return Deadline != 0 || monitorInDoubt; + } + + /// + /// Does MRT monitor record exist. + /// + public bool MonitorExists() + { + return Deadline != 0; + } + + public bool SetRollAttempted() { if (rollAttempted) { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, - "commit() or abort() may only be called once for a given MRT"); + return false; } rollAttempted = true; + return true; } public void Clear() diff --git a/AerospikeClient/Metrics/MetricsWriter.cs b/AerospikeClient/Metrics/MetricsWriter.cs index e3b54d20..7d94989a 100644 --- a/AerospikeClient/Metrics/MetricsWriter.cs +++ b/AerospikeClient/Metrics/MetricsWriter.cs @@ -148,7 +148,7 @@ private void Open() sb.Append(now.ToString(timestampFormat)); sb.Append(" header(1)"); - sb.Append(" cluster[name,cpu,mem,recoverQueueSize,invalidNodeCount,tranCount,retryCount,delayQueueTimeoutCount,asyncThreadsInUse,asyncCompletionPortsInUse,node[]]"); + sb.Append(" cluster[name,cpu,mem,recoverQueueSize,invalidNodeCount,commandCount,retryCount,delayQueueTimeoutCount,asyncThreadsInUse,asyncCompletionPortsInUse,node[]]"); sb.Append(" node[name,address,port,syncConn,asyncConn,errors,timeouts,latency[]]"); sb.Append(" conn[inUse,inPool,opened,closed]"); sb.Append(" latency("); @@ -180,7 +180,7 @@ private void WriteCluster(Cluster cluster) sb.Append(','); sb.Append(cluster.InvalidNodeCount); // Cumulative. Not reset on each interval. sb.Append(','); - sb.Append(cluster.GetTranCount()); // Cumulative. Not reset on each interval. + sb.Append(cluster.GetCommandCount()); // Cumulative. Not reset on each interval. sb.Append(','); sb.Append(cluster.GetRetryCount()); // Cumulative. Not reset on each interval. sb.Append(','); diff --git a/AerospikeClient/Policy/ClientPolicy.cs b/AerospikeClient/Policy/ClientPolicy.cs index be3dca01..2e9c5edd 100644 --- a/AerospikeClient/Policy/ClientPolicy.cs +++ b/AerospikeClient/Policy/ClientPolicy.cs @@ -229,13 +229,13 @@ public class ClientPolicy /// /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. /// - public TranVerifyPolicy tranVerifyPolicyDefault = new TranVerifyPolicy(); + public TxnVerifyPolicy txnVerifyPolicyDefault = new TxnVerifyPolicy(); /// /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) /// or back(abort) in a batch. /// - public TranRollPolicy tranRollPolicyDefault = new TranRollPolicy(); + public TxnRollPolicy txnRollPolicyDefault = new TxnRollPolicy(); /// /// Default info policy that is used when info command's policy is null. @@ -337,8 +337,8 @@ public ClientPolicy(ClientPolicy other) this.batchWritePolicyDefault = new BatchWritePolicy(other.batchWritePolicyDefault); this.batchDeletePolicyDefault = new BatchDeletePolicy(other.batchDeletePolicyDefault); this.batchUDFPolicyDefault = new BatchUDFPolicy(other.batchUDFPolicyDefault); - this.tranVerifyPolicyDefault = new TranVerifyPolicy(other.tranVerifyPolicyDefault); - this.tranRollPolicyDefault = new TranRollPolicy(other.tranRollPolicyDefault); + this.txnVerifyPolicyDefault = new TxnVerifyPolicy(other.txnVerifyPolicyDefault); + this.txnRollPolicyDefault = new TxnRollPolicy(other.txnRollPolicyDefault); this.infoPolicyDefault = new InfoPolicy(other.infoPolicyDefault); this.tlsPolicy = (other.tlsPolicy != null) ? new TlsPolicy(other.tlsPolicy) : null; this.ipMap = other.ipMap; diff --git a/AerospikeClient/Policy/Policy.cs b/AerospikeClient/Policy/Policy.cs index aa431247..cd1962e9 100644 --- a/AerospikeClient/Policy/Policy.cs +++ b/AerospikeClient/Policy/Policy.cs @@ -31,7 +31,7 @@ public class Policy /// Default: null /// /// - public Tran Tran { get; set; } + public Txn Txn { get; set; } /// /// Read policy for AP (availability) namespaces. @@ -283,7 +283,7 @@ public class Policy /// public Policy(Policy other) { - this.Tran = other.Tran; + this.Txn = other.Txn; this.readModeAP = other.readModeAP; this.readModeSC = other.readModeSC; this.replica = other.replica; @@ -305,7 +305,7 @@ public Policy(Policy other) /// public Policy() { - Tran = null; + Txn = null; } /// diff --git a/AerospikeClient/Policy/TranRollPolicy.cs b/AerospikeClient/Policy/TxnRollPolicy.cs similarity index 90% rename from AerospikeClient/Policy/TranRollPolicy.cs rename to AerospikeClient/Policy/TxnRollPolicy.cs index c123c2fa..e2a90034 100644 --- a/AerospikeClient/Policy/TranRollPolicy.cs +++ b/AerospikeClient/Policy/TxnRollPolicy.cs @@ -21,12 +21,12 @@ namespace Aerospike.Client /// Multi-record transaction (MRT) policy fields used to batch roll forward/backward records on /// commit or abort.Used a placeholder for now as there are no additional fields beyond BatchPolicy. /// - public sealed class TranRollPolicy : BatchPolicy + public sealed class TxnRollPolicy : BatchPolicy { /// /// Copy policy from another policy. /// - public TranRollPolicy(TranRollPolicy other) : + public TxnRollPolicy(TxnRollPolicy other) : base(other) { } @@ -34,7 +34,7 @@ public TranRollPolicy(TranRollPolicy other) : /// /// Default constructor. /// - public TranRollPolicy() + public TxnRollPolicy() { replica = Replica.MASTER; maxRetries = 5; diff --git a/AerospikeClient/Policy/TranVerifyPolicy.cs b/AerospikeClient/Policy/TxnVerifyPolicy.cs similarity index 90% rename from AerospikeClient/Policy/TranVerifyPolicy.cs rename to AerospikeClient/Policy/TxnVerifyPolicy.cs index e641ba33..4130fc98 100644 --- a/AerospikeClient/Policy/TranVerifyPolicy.cs +++ b/AerospikeClient/Policy/TxnVerifyPolicy.cs @@ -21,12 +21,12 @@ namespace Aerospike.Client /// Multi-record transaction (MRT) policy fields used to batch verify record versions on commit. /// Used a placeholder for now as there are no additional fields beyond BatchPolicy. ///
- public sealed class TranVerifyPolicy : BatchPolicy + public sealed class TxnVerifyPolicy : BatchPolicy { /// /// Copy policy from another policy. /// - public TranVerifyPolicy(TranVerifyPolicy other) : + public TxnVerifyPolicy(TxnVerifyPolicy other) : base(other) { } @@ -34,7 +34,7 @@ public TranVerifyPolicy(TranVerifyPolicy other) : /// /// Default constructor. /// - public TranVerifyPolicy() + public TxnVerifyPolicy() { readModeSC = ReadModeSC.LINEARIZE; replica = Replica.MASTER; diff --git a/AerospikeClient/Query/QueryExecutor.cs b/AerospikeClient/Query/QueryExecutor.cs index 44325952..0fc72986 100644 --- a/AerospikeClient/Query/QueryExecutor.cs +++ b/AerospikeClient/Query/QueryExecutor.cs @@ -45,7 +45,7 @@ public QueryExecutor(Cluster cluster, QueryPolicy policy, Statement statement, N // Initialize maximum number of nodes to query in parallel. this.maxConcurrentNodes = (policy.maxConcurrentNodes == 0 || policy.maxConcurrentNodes >= threads.Length) ? threads.Length : policy.maxConcurrentNodes; - cluster.AddTran(); + cluster.AddCommand(); } protected internal void InitializeThreads() diff --git a/AerospikeClient/Query/QueryListenerExecutor.cs b/AerospikeClient/Query/QueryListenerExecutor.cs index da72f62b..b215e835 100644 --- a/AerospikeClient/Query/QueryListenerExecutor.cs +++ b/AerospikeClient/Query/QueryListenerExecutor.cs @@ -29,7 +29,7 @@ public static void execute PartitionTracker tracker ) { - cluster.AddTran(); + cluster.AddCommand(); ulong taskId = statement.PrepareTaskId(); diff --git a/AerospikeClient/Query/QueryPartitionExecutor.cs b/AerospikeClient/Query/QueryPartitionExecutor.cs index 3c00b4ce..f970055b 100644 --- a/AerospikeClient/Query/QueryPartitionExecutor.cs +++ b/AerospikeClient/Query/QueryPartitionExecutor.cs @@ -51,7 +51,7 @@ PartitionTracker tracker this.cancel = new CancellationTokenSource(); this.tracker = tracker; this.recordSet = new RecordSet(this, policy.recordQueueSize, cancel.Token); - cluster.AddTran(); + cluster.AddCommand(); ThreadPool.UnsafeQueueUserWorkItem(this.Run, null); } diff --git a/AerospikeClientProxy/Proxy/AerospikeClientProxy.cs b/AerospikeClientProxy/Proxy/AerospikeClientProxy.cs index e19597df..2275256f 100644 --- a/AerospikeClientProxy/Proxy/AerospikeClientProxy.cs +++ b/AerospikeClientProxy/Proxy/AerospikeClientProxy.cs @@ -103,13 +103,13 @@ public class AerospikeClientProxy : IDisposable, IAerospikeClient /// /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. /// - public BatchPolicy tranVerifyPolicyDefault { get; set; } + public TxnVerifyPolicy txnVerifyPolicyDefault { get; set; } /// /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) /// or back(abort) in a batch. /// - public BatchPolicy tranRollPolicyDefault { get; set; } + public TxnRollPolicy txnRollPolicyDefault { get; set; } /// /// Default info policy that is used when info command policy is null. @@ -161,8 +161,8 @@ public AerospikeClientProxy(ClientPolicy policy, params Host[] hosts) this.batchWritePolicyDefault = policy.batchWritePolicyDefault; this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; - this.tranVerifyPolicyDefault = policy.tranVerifyPolicyDefault; - this.tranRollPolicyDefault = policy.tranRollPolicyDefault; + this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault; + this.txnRollPolicyDefault = policy.txnRollPolicyDefault; this.infoPolicyDefault = policy.infoPolicyDefault; this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); @@ -320,20 +320,20 @@ public BatchUDFPolicy BatchUDFPolicyDefault /// /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. /// - public BatchPolicy TranVerifyPolicyDefault + public TxnVerifyPolicy TxnVerifyPolicyDefault { - get { return tranVerifyPolicyDefault; } - set { tranVerifyPolicyDefault = value; } + get { return txnVerifyPolicyDefault; } + set { txnVerifyPolicyDefault = value; } } /// /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) /// or back(abort) in a batch. /// - public BatchPolicy TranRollPolicyDefault + public TxnRollPolicy TxnRollPolicyDefault { - get { return tranRollPolicyDefault; } - set { tranRollPolicyDefault = value; } + get { return txnRollPolicyDefault; } + set { txnRollPolicyDefault = value; } } /// @@ -469,9 +469,9 @@ private string GetVersion() ///

///
/// multi-record transaction - public void Commit(Tran tran) + public CommitStatus.CommitStatusType Commit(Txn tran) { - + return CommitStatus.CommitStatusType.OK; } /// @@ -481,9 +481,9 @@ public void Commit(Tran tran) ///

///
/// multi-record transaction - public void Abort(Tran tran) + public AbortStatus.AbortStatusType Abort(Txn tran) { - + return AbortStatus.AbortStatusType.OK; } //------------------------------------------------------- diff --git a/AerospikeClientProxy/Proxy/AsyncClientProxy.cs b/AerospikeClientProxy/Proxy/AsyncClientProxy.cs index b9990631..b40b4cc5 100644 --- a/AerospikeClientProxy/Proxy/AsyncClientProxy.cs +++ b/AerospikeClientProxy/Proxy/AsyncClientProxy.cs @@ -127,10 +127,9 @@ public AsyncClientProxy(AsyncClientPolicy policy, params Host[] hosts) ///

///
/// where to send results - /// multi-record transaction - public void Commit(CommitListener listener, Tran tran) + /// multi-record transaction + public void Commit(CommitListener listener, Txn txn) { - } /// @@ -143,10 +142,9 @@ public void Commit(CommitListener listener, Tran tran) ///

///
/// where to send results - /// multi-record transaction - public void Abort(AbortListener listener, Tran tran) + /// multi-record transaction + public void Abort(AbortListener listener, Txn txn) { - } //------------------------------------------------------- diff --git a/AerospikeTest/Args.cs b/AerospikeTest/Args.cs index 40c4c28f..e534f3e2 100644 --- a/AerospikeTest/Args.cs +++ b/AerospikeTest/Args.cs @@ -43,6 +43,7 @@ public class Args public string clusterName; public string ns; public string set; + public bool useServicesAlternate; public string tlsName; public string proxyTlsName; public TlsPolicy tlsPolicy; @@ -70,6 +71,7 @@ public Args() ns = section.GetSection("Namespace").Value; set = section.GetSection("Set").Value; authMode = (AuthMode)Enum.Parse(typeof(AuthMode), section.GetSection("AuthMode").Value, true); + useServicesAlternate = bool.Parse(section.GetSection("UseServicesAlternate").Value); bool tlsEnable = bool.Parse(section.GetSection("TlsEnable").Value); @@ -130,6 +132,7 @@ private void ConnectSync() policy.tlsPolicy = tlsPolicy; policy.authMode = authMode; policy.timeout = timeout; + policy.useServicesAlternate = useServicesAlternate; if (user != null && user.Length > 0) { @@ -187,6 +190,11 @@ private void ConnectProxy() proxyAsyncPolicy.maxConnsPerNode = 100; proxyPolicy.timeout = timeout; proxyAsyncPolicy.timeout = timeout; + policy.useServicesAlternate = useServicesAlternate; + proxyPolicy.useServicesAlternate = useServicesAlternate; + asyncPolicy.useServicesAlternate = useServicesAlternate; + proxyAsyncPolicy.useServicesAlternate = useServicesAlternate; + if (user != null && user.Length > 0) { @@ -287,6 +295,7 @@ private void ConnectAsync() policy.authMode = authMode; policy.asyncMaxCommands = 300; policy.timeout = timeout; + policy.useServicesAlternate = useServicesAlternate; if (user != null && user.Length > 0) { diff --git a/AerospikeTest/Async/TestAsyncTxn.cs b/AerospikeTest/Async/TestAsyncTxn.cs new file mode 100644 index 00000000..da53dadc --- /dev/null +++ b/AerospikeTest/Async/TestAsyncTxn.cs @@ -0,0 +1,492 @@ +/* + * Copyright 2012-2018 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Aerospike.Client; +using System.Reflection; +using System.Text; + +namespace Aerospike.Test +{ + [TestClass] + public class TestAsyncTxn : TestAsync + { + private static readonly string binName = "bin"; + + [ClassInitialize()] + public static void Prepare(TestContext testContext) + { + if (!args.testProxy || (args.testProxy && nativeClient != null)) + { + Assembly assembly = Assembly.GetExecutingAssembly(); + RegisterTask task = nativeClient.Register(null, assembly, "Aerospike.Test.LuaResources.record_example.lua", "record_example.lua", Language.LUA); + task.Wait(); + } + } + + [TestMethod] + public void AsyncTxnWrite() + { + Key key = new(args.ns, args.set, "asyncTxnWrite"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + client.Commit(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void AsyncTxnWriteTwice() + { + Key key = new(args.ns, args.set, "mrtkey2"); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val1")); + client.Put(wp, key, new Bin(binName, "val2")); + + client.Commit(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void AsyncTxnWriteConflict() + { + Key key = new(args.ns, args.set, "mrtkey21"); + + Txn txn1 = new(); + Txn txn2 = new(); + + WritePolicy wp1 = client.WritePolicyDefault; + WritePolicy wp2 = client.WritePolicyDefault; + wp1.Txn = txn1; + wp2.Txn = txn2; + + client.Put(wp1, key, new Bin(binName, "val1")); + + try + { + client.Put(wp2, key, new Bin(binName, "val2")); + } + catch (AerospikeException ae) + { + if (ae.Result != ResultCode.MRT_BLOCKED) + { + throw ae; + } + } + + client.Commit(txn1); + client.Commit(txn2); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void AsyncTxnWriteBlock() + { + Key key = new(args.ns, args.set, "mrtkey3"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + try + { + // This write should be blocked. + client.Put(null, key, new Bin(binName, "val3")); + throw new AerospikeException("Unexpected success"); + } + catch (AerospikeException e) + { + if (e.Result != ResultCode.MRT_BLOCKED) + { + throw e; + } + } + + client.Commit(txn); + } + + [TestMethod] + public void AsyncTxnWriteRead() + { + Key key = new(args.ns, args.set, "mrtkey4"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + + client.Commit(txn); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void AsyncTxnWriteAbort() + { + Key key = new(args.ns, args.set, "mrtkey5"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + Policy p = client.ReadPolicyDefault; + p.Txn = txn; + Record record = client.Get(p, key); + AssertBinEqual(key, record, binName, "val2"); + + client.Abort(txn); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void AsyncTxnDelete() + { + Key key = new(args.ns, args.set, "mrtkey6"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + wp.durableDelete = true; + client.Delete(wp, key); + + client.Commit(txn); + + Record record = client.Get(null, key); + Assert.IsNull(record); + } + + [TestMethod] + public void AsyncTxnDeleteAbort() + { + Key key = new(args.ns, args.set, "mrtkey7"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + wp.durableDelete = true; + client.Delete(wp, key); + + client.Abort(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void AsyncTxnDeleteTwice() + { + Key key = new(args.ns, args.set, "mrtkey8"); + + Txn txn = new(); + + client.Put(null, key, new Bin(binName, "val1")); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + wp.durableDelete = true; + client.Delete(wp, key); + client.Delete(wp, key); + + client.Commit(txn); + + Record record = client.Get(null, key); + Assert.IsNull(record); + } + + [TestMethod] + public void AsyncTxnTouch() + { + Key key = new(args.ns, args.set, "mrtkey9"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Touch(wp, key); + + client.Commit(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void AsyncTxnTouchAbort() + { + Key key = new(args.ns, args.set, "mrtkey10"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Touch(wp, key); + + client.Abort(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void AsyncTxnOperateWrite() + { + Key key = new(args.ns, args.set, "mrtkey11"); + + client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + Record record = client.Operate(wp, key, + Operation.Put(new Bin(binName, "val2")), + Operation.Get("bin2") + ); + AssertBinEqual(key, record, "bin2", "bal1"); + + client.Commit(txn); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void AsyncTxnOperateWriteAbort() + { + Key key = new(args.ns, args.set, "mrtkey12"); + + client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + Record record = client.Operate(wp, key, + Operation.Put(new Bin(binName, "val2")), + Operation.Get("bin2") + ); + AssertBinEqual(key, record, "bin2", "bal1"); + + client.Abort(txn); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void AsyncTxnUDF() + { + Key key = new(args.ns, args.set, "mrtkey13"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); + + client.Commit(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void AsyncTxnUDFAbort() + { + Key key = new(args.ns, args.set, "mrtkey14"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); + + client.Abort(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void AsyncTxnBatch() + { + Key[] keys = new Key[10]; + Bin bin = new(binName, 1); + + for (int i = 0; i < keys.Length; i++) + { + Key key = new(args.ns, args.set, i); + keys[i] = key; + + client.Put(null, key, bin); + } + + Record[] recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 1); + + Txn txn = new(); + + bin = new(binName, 2); + + BatchPolicy bp = BatchPolicy.WriteDefault(); + bp.Txn = txn; + + BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); + + if (!bresults.status) + { + StringBuilder sb = new StringBuilder(); + sb.Append("Batch failed:"); + sb.Append(System.Environment.NewLine); + + foreach (BatchRecord br in bresults.records) + { + if (br.resultCode == 0) + { + sb.Append("Record: " + br.record); + } + else + { + sb.Append("ResultCode: " + br.resultCode); + } + sb.Append(System.Environment.NewLine); + } + + throw new AerospikeException(sb.ToString()); + } + + client.Commit(txn); + + recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 2); + } + + [TestMethod] + public void AsyncTxnBatchAbort() + { + var keys = new Key[10]; + Bin bin = new(binName, 1); + + for (int i = 0; i < keys.Length; i++) + { + Key key = new(args.ns, args.set, i); + keys[i] = key; + + client.Put(null, key, bin); + } + + Record[] recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 1); + + Txn txn = new(); + + bin = new Bin(binName, 2); + + BatchPolicy bp = BatchPolicy.WriteDefault(); + bp.Txn = txn; + + BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); + + if (!bresults.status) + { + StringBuilder sb = new StringBuilder(); + sb.Append("Batch failed:"); + sb.Append(System.Environment.NewLine); + + foreach (BatchRecord br in bresults.records) + { + if (br.resultCode == 0) + { + sb.Append("Record: " + br.record); + } + else + { + sb.Append("ResultCode: " + br.resultCode); + } + sb.Append(System.Environment.NewLine); + } + + throw new AerospikeException(sb.ToString()); + } + + client.Abort(txn); + + recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 1); + } + + private void AsyncAssertBatchEqual(Key[] keys, Record[] recs, int expected) + { + for (int i = 0; i < keys.Length; i++) + { + Key key = keys[i]; + Record rec = recs[i]; + + Assert.IsNotNull(rec); + + int received = rec.GetInt(binName); + Assert.AreEqual(expected, received); + } + } + } +} diff --git a/AerospikeTest/Sync/Basic/TestTran.cs b/AerospikeTest/Sync/Basic/TestTxn.cs similarity index 74% rename from AerospikeTest/Sync/Basic/TestTran.cs rename to AerospikeTest/Sync/Basic/TestTxn.cs index ab30ae82..3a3784fa 100644 --- a/AerospikeTest/Sync/Basic/TestTran.cs +++ b/AerospikeTest/Sync/Basic/TestTxn.cs @@ -22,7 +22,7 @@ namespace Aerospike.Test { [TestClass] - public class TestTran : TestSync + public class TestTxn : TestSync { private static readonly string binName = "bin"; @@ -38,54 +38,54 @@ public static void Prepare(TestContext testContext) } [TestMethod] - public void TranWrite() + public void TxnWrite() { - Key key = new Key(args.ns, args.set, "mrtkey1"); + Key key = new(args.ns, args.set, "mrtkey1"); client.Put(null, key, new Bin(binName, "val1")); - Tran tran = new Tran(); + Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; client.Put(wp, key, new Bin(binName, "val2")); - client.Commit(tran); + client.Commit(txn); Record record = client.Get(null, key); AssertBinEqual(key, record, binName, "val2"); } [TestMethod] - public void TranWriteTwice() + public void TxnWriteTwice() { - Key key = new Key(args.ns, args.set, "mrtkey2"); + Key key = new(args.ns, args.set, "mrtkey2"); - Tran tran = new Tran(); + Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; client.Put(wp, key, new Bin(binName, "val1")); client.Put(wp, key, new Bin(binName, "val2")); - client.Commit(tran); + client.Commit(txn); Record record = client.Get(null, key); AssertBinEqual(key, record, binName, "val2"); } [TestMethod] - public void tranWriteConflict() + public void TxnWriteConflict() { - Key key = new Key(args.ns, args.set, "mrtkey21"); + Key key = new(args.ns, args.set, "mrtkey21"); - Tran tran1 = new Tran(); - Tran tran2 = new Tran(); + Txn txn1 = new(); + Txn txn2 = new(); WritePolicy wp1 = client.WritePolicyDefault; WritePolicy wp2 = client.WritePolicyDefault; - wp1.Tran = tran1; - wp2.Tran = tran2; + wp1.Txn = txn1; + wp2.Txn = txn2; client.Put(wp1, key, new Bin(binName, "val1")); @@ -101,24 +101,24 @@ public void tranWriteConflict() } } - client.Commit(tran1); - client.Commit(tran2); + client.Commit(txn1); + client.Commit(txn2); Record record = client.Get(null, key); AssertBinEqual(key, record, binName, "val1"); } [TestMethod] - public void TranWriteBlock() + public void TxnWriteBlock() { - Key key = new Key(args.ns, args.set, "mrtkey3"); + Key key = new(args.ns, args.set, "mrtkey3"); client.Put(null, key, new Bin(binName, "val1")); - Tran tran = new Tran(); + Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; client.Put(wp, key, new Bin(binName, "val2")); try @@ -135,247 +135,247 @@ public void TranWriteBlock() } } - client.Commit(tran); + client.Commit(txn); } [TestMethod] - public void TranWriteRead() + public void TxnWriteRead() { - Key key = new Key(args.ns, args.set, "mrtkey4"); + Key key = new(args.ns, args.set, "mrtkey4"); client.Put(null, key, new Bin(binName, "val1")); - Tran tran = new Tran(); + Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; client.Put(wp, key, new Bin(binName, "val2")); Record record = client.Get(null, key); AssertBinEqual(key, record, binName, "val1"); - client.Commit(tran); + client.Commit(txn); record = client.Get(null, key); AssertBinEqual(key, record, binName, "val2"); } [TestMethod] - public void TranWriteAbort() + public void TxnWriteAbort() { - Key key = new Key(args.ns, args.set, "mrtkey5"); + Key key = new(args.ns, args.set, "mrtkey5"); client.Put(null, key, new Bin(binName, "val1")); - Tran tran = new Tran(); + Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; client.Put(wp, key, new Bin(binName, "val2")); Policy p = client.ReadPolicyDefault; - p.Tran = tran; + p.Txn = txn; Record record = client.Get(p, key); AssertBinEqual(key, record, binName, "val2"); - client.Abort(tran); + client.Abort(txn); record = client.Get(null, key); AssertBinEqual(key, record, binName, "val1"); } [TestMethod] - public void TranDelete() + public void TxnDelete() { - Key key = new Key(args.ns, args.set, "mrtkey6"); + Key key = new(args.ns, args.set, "mrtkey6"); client.Put(null, key, new Bin(binName, "val1")); - Tran tran = new Tran(); + Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; wp.durableDelete = true; client.Delete(wp, key); - client.Commit(tran); + client.Commit(txn); Record record = client.Get(null, key); Assert.IsNull(record); } [TestMethod] - public void TranDeleteAbort() + public void TxnDeleteAbort() { - Key key = new Key(args.ns, args.set, "mrtkey7"); + Key key = new(args.ns, args.set, "mrtkey7"); client.Put(null, key, new Bin(binName, "val1")); - Tran tran = new Tran(); + Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; wp.durableDelete = true; client.Delete(wp, key); - client.Abort(tran); + client.Abort(txn); Record record = client.Get(null, key); AssertBinEqual(key, record, binName, "val1"); } [TestMethod] - public void TranDeleteTwice() + public void TxnDeleteTwice() { - Key key = new Key(args.ns, args.set, "mrtkey8"); + Key key = new(args.ns, args.set, "mrtkey8"); - Tran tran = new Tran(); + Txn txn = new(); client.Put(null, key, new Bin(binName, "val1")); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; wp.durableDelete = true; client.Delete(wp, key); client.Delete(wp, key); - client.Commit(tran); + client.Commit(txn); Record record = client.Get(null, key); Assert.IsNull(record); } [TestMethod] - public void TranTouch() + public void TxnTouch() { - Key key = new Key(args.ns, args.set, "mrtkey9"); + Key key = new(args.ns, args.set, "mrtkey9"); client.Put(null, key, new Bin(binName, "val1")); - Tran tran = new Tran(); + Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; client.Touch(wp, key); - client.Commit(tran); + client.Commit(txn); Record record = client.Get(null, key); AssertBinEqual(key, record, binName, "val1"); } [TestMethod] - public void TranTouchAbort() + public void TxnTouchAbort() { - Key key = new Key(args.ns, args.set, "mrtkey10"); + Key key = new(args.ns, args.set, "mrtkey10"); client.Put(null, key, new Bin(binName, "val1")); - Tran tran = new Tran(); + Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; client.Touch(wp, key); - client.Abort(tran); + client.Abort(txn); Record record = client.Get(null, key); AssertBinEqual(key, record, binName, "val1"); } [TestMethod] - public void TranOperateWrite() + public void TxnOperateWrite() { - Key key = new Key(args.ns, args.set, "mrtkey11"); + Key key = new(args.ns, args.set, "mrtkey11"); client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); - Tran tran = new Tran(); + Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; Record record = client.Operate(wp, key, Operation.Put(new Bin(binName, "val2")), Operation.Get("bin2") ); AssertBinEqual(key, record, "bin2", "bal1"); - client.Commit(tran); + client.Commit(txn); record = client.Get(null, key); AssertBinEqual(key, record, binName, "val2"); } [TestMethod] - public void TranOperateWriteAbort() + public void TxnOperateWriteAbort() { - Key key = new Key(args.ns, args.set, "mrtkey12"); + Key key = new(args.ns, args.set, "mrtkey12"); client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); - Tran tran = new Tran(); + Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; Record record = client.Operate(wp, key, Operation.Put(new Bin(binName, "val2")), Operation.Get("bin2") ); AssertBinEqual(key, record, "bin2", "bal1"); - client.Abort(tran); + client.Abort(txn); record = client.Get(null, key); AssertBinEqual(key, record, binName, "val1"); } [TestMethod] - public void TranUDF() + public void TxnUDF() { - Key key = new Key(args.ns, args.set, "mrtkey13"); + Key key = new(args.ns, args.set, "mrtkey13"); client.Put(null, key, new Bin(binName, "val1")); - Tran tran = new Tran(); + Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); - client.Commit(tran); + client.Commit(txn); Record record = client.Get(null, key); AssertBinEqual(key, record, binName, "val2"); } [TestMethod] - public void TranUDFAbort() + public void TxnUDFAbort() { - Key key = new Key(args.ns, args.set, "mrtkey14"); + Key key = new(args.ns, args.set, "mrtkey14"); client.Put(null, key, new Bin(binName, "val1")); - Tran tran = new Tran(); + Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; - wp.Tran = tran; + wp.Txn = txn; client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); - client.Abort(tran); + client.Abort(txn); Record record = client.Get(null, key); AssertBinEqual(key, record, binName, "val1"); } [TestMethod] - public void TranBatch() + public void TxnBatch() { Key[] keys = new Key[10]; - Bin bin = new Bin(binName, 1); + Bin bin = new(binName, 1); for (int i = 0; i < keys.Length; i++) { - Key key = new Key(args.ns, args.set, i); + Key key = new(args.ns, args.set, i); keys[i] = key; client.Put(null, key, bin); @@ -384,12 +384,12 @@ public void TranBatch() Record[] recs = client.Get(null, keys); AssertBatchEqual(keys, recs, 1); - Tran tran = new Tran(); + Txn txn = new(); - bin = new Bin(binName, 2); + bin = new(binName, 2); BatchPolicy bp = BatchPolicy.WriteDefault(); - bp.Tran = tran; + bp.Txn = txn; BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); @@ -415,21 +415,21 @@ public void TranBatch() throw new AerospikeException(sb.ToString()); } - client.Commit(tran); + client.Commit(txn); recs = client.Get(null, keys); AssertBatchEqual(keys, recs, 2); } [TestMethod] - public void TranBatchAbort() + public void TxnBatchAbort() { - Key[] keys = new Key[10]; - Bin bin = new Bin(binName, 1); + var keys = new Key[10]; + Bin bin = new(binName, 1); for (int i = 0; i < keys.Length; i++) { - Key key = new Key(args.ns, args.set, i); + Key key = new(args.ns, args.set, i); keys[i] = key; client.Put(null, key, bin); @@ -438,12 +438,12 @@ public void TranBatchAbort() Record[] recs = client.Get(null, keys); AssertBatchEqual(keys, recs, 1); - Tran tran = new Tran(); + Txn txn = new(); bin = new Bin(binName, 2); BatchPolicy bp = BatchPolicy.WriteDefault(); - bp.Tran = tran; + bp.Txn = txn; BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); @@ -469,7 +469,7 @@ public void TranBatchAbort() throw new AerospikeException(sb.ToString()); } - client.Abort(tran); + client.Abort(txn); recs = client.Get(null, keys); AssertBatchEqual(keys, recs, 1); diff --git a/AerospikeTest/settings.json b/AerospikeTest/settings.json index 1f22cf00..7ca33c77 100644 --- a/AerospikeTest/settings.json +++ b/AerospikeTest/settings.json @@ -1,7 +1,7 @@ { "Host": "localhost", - "Port": 3000, - "ProxyHost": "localhost", + "Port": 3100, + "ProxyHost": "localhost", "ProxyPort": 4000, "TestProxy": false, "ClusterName": "", @@ -10,13 +10,14 @@ "User": "", "Password": "", "Timeout": 25000, + "UseServicesAlternate": true, "TlsEnable": false, "TlsName": "", "TlsProtocols": "", "TlsRevoke": "", "TlsClientCertFile": "", "TlsLoginOnly": false, - "ProxyTlsEnable": false, + "ProxyTlsEnable": true, "ProxyTlsName": "", "ProxyTlsProtocols": "", "ProxyTlsRevoke": "", From 69812d933f2a5f5fd885c377abf9c83b5ee88efb Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Wed, 28 Aug 2024 15:05:49 -0600 Subject: [PATCH 04/41] Go through reference again, fixing some bugs --- AerospikeClient/Async/AsyncBatch.cs | 4 +- AerospikeClient/Async/AsyncClient.cs | 27 +- AerospikeClient/Async/AsyncClientPolicy.cs | 2 +- AerospikeClient/Async/AsyncCommand.cs | 10 +- AerospikeClient/Async/AsyncConnector.cs | 2 +- AerospikeClient/Async/AsyncDelete.cs | 1 - AerospikeClient/Async/AsyncExists.cs | 1 + AerospikeClient/Async/AsyncOperateWrite.cs | 1 + .../Async/AsyncQueryPartitionExecutor.cs | 2 +- AerospikeClient/Async/AsyncReadBase.cs | 2 +- AerospikeClient/Async/AsyncReadHeader.cs | 2 +- .../Async/AsyncScanPartitionExecutor.cs | 2 +- AerospikeClient/Async/AsyncSingleCommand.cs | 10 +- AerospikeClient/Async/AsyncTxnAddKeys.cs | 2 +- .../Async/AsyncTxnMarkRollForward.cs | 2 + AerospikeClient/Async/AsyncTxnMonitor.cs | 24 +- AerospikeClient/Async/AsyncTxnRoll.cs | 8 +- AerospikeClient/Async/AsyncWrite.cs | 1 - AerospikeClient/Async/AsyncWriteBase.cs | 2 +- AerospikeClient/Async/IAsyncClient.cs | 28 +- .../BatchOperateListListenerAdapter.cs | 2 +- AerospikeClient/Cluster/Cluster.cs | 2 +- AerospikeClient/Cluster/ClusterStats.cs | 2 +- AerospikeClient/Command/Batch.cs | 4 +- AerospikeClient/Command/BatchAttr.cs | 4 +- AerospikeClient/Command/BatchExecutor.cs | 2 +- AerospikeClient/Command/ByteUtil.cs | 2 +- AerospikeClient/Command/Command.cs | 366 +++++++----- AerospikeClient/Command/DeleteCommand.cs | 2 +- AerospikeClient/Command/ExecuteCommand.cs | 5 - AerospikeClient/Command/OperateArgs.cs | 12 - AerospikeClient/Command/OperateCommandRead.cs | 2 +- .../Command/OperateCommandWrite.cs | 4 - AerospikeClient/Command/ReadCommand.cs | 10 +- AerospikeClient/Command/ScanExecutor.cs | 2 +- AerospikeClient/Command/SyncCommand.cs | 10 +- AerospikeClient/Command/SyncReadCommand.cs | 2 +- AerospikeClient/Command/SyncWriteCommand.cs | 2 +- AerospikeClient/Command/TouchCommand.cs | 1 - AerospikeClient/Command/TxnAddKeys.cs | 2 +- AerospikeClient/Command/TxnClose.cs | 8 +- AerospikeClient/Command/TxnMarkRollForward.cs | 8 +- AerospikeClient/Command/TxnMonitor.cs | 42 +- AerospikeClient/Command/TxnRoll.cs | 34 +- AerospikeClient/Command/WriteCommand.cs | 1 - AerospikeClient/Listener/AbortListener.cs | 8 +- AerospikeClient/Listener/CommitListener.cs | 2 +- AerospikeClient/Main/AerospikeClient.cs | 17 +- AerospikeClient/Main/AerospikeException.cs | 47 +- AerospikeClient/Main/CommitError.cs | 6 +- AerospikeClient/Main/CommitStatus.cs | 2 +- AerospikeClient/Main/IAerospikeClient.cs | 2 +- AerospikeClient/Main/Key.cs | 1 + AerospikeClient/Main/ResultCode.cs | 8 +- AerospikeClient/Metrics/LatencyBuckets.cs | 2 +- AerospikeClient/Policy/BatchDeletePolicy.cs | 4 +- AerospikeClient/Policy/BatchPolicy.cs | 4 +- AerospikeClient/Policy/BatchUDFPolicy.cs | 4 +- AerospikeClient/Policy/BatchWritePolicy.cs | 4 +- AerospikeClient/Policy/ClientPolicy.cs | 4 +- AerospikeClient/Policy/CommitLevel.cs | 2 +- AerospikeClient/Policy/Policy.cs | 39 +- AerospikeClient/Policy/QueryPolicy.cs | 4 + AerospikeClient/Policy/ScanPolicy.cs | 4 + AerospikeClient/Policy/WritePolicy.cs | 4 +- AerospikeClient/Query/QueryExecutor.cs | 2 +- .../Query/QueryListenerExecutor.cs | 2 +- .../Query/QueryPartitionExecutor.cs | 2 +- AerospikeClient/Query/RecordSet.cs | 2 +- AerospikeClient/Query/ResultSet.cs | 2 +- .../Proxy/AerospikeClientProxy.cs | 22 +- .../Proxy/AsyncClientProxy.cs | 12 +- AerospikeTest/Async/TestAsync.cs | 35 ++ AerospikeTest/Async/TestAsyncTxn.cs | 534 ++++++++++++------ AerospikeTest/settings.json | 4 +- 75 files changed, 834 insertions(+), 615 deletions(-) diff --git a/AerospikeClient/Async/AsyncBatch.cs b/AerospikeClient/Async/AsyncBatch.cs index 83586343..1d1a808e 100644 --- a/AerospikeClient/Async/AsyncBatch.cs +++ b/AerospikeClient/Async/AsyncBatch.cs @@ -1887,11 +1887,13 @@ public abstract class AsyncBatchExecutor : IBatchStatus private readonly bool hasResultCode; private bool error; public AsyncBatchCommand[] commands; + public AsyncCluster cluster; public AsyncBatchExecutor(AsyncCluster cluster, bool hasResultCode) { this.hasResultCode = hasResultCode; - cluster.AddCommand(); + this.cluster = cluster; + cluster.AddCommandCount(); } public void Execute() diff --git a/AerospikeClient/Async/AsyncClient.cs b/AerospikeClient/Async/AsyncClient.cs index da41e32c..1922f3d7 100644 --- a/AerospikeClient/Async/AsyncClient.cs +++ b/AerospikeClient/Async/AsyncClient.cs @@ -135,7 +135,7 @@ public AsyncClient(AsyncClientPolicy policy, params Host[] hosts) /// /// Asynchronously attempt to commit the given multi-record transaction. First, the expected /// record versions are sent to the server nodes for verification.If all nodes return success, - /// the transaction is committed.Otherwise, the transaction is aborted. + /// the transaction is committed. Otherwise, the transaction is aborted. /// /// This method registers the command with an event loop and returns. /// The event loop thread will process the command and send the results to the listener. @@ -188,7 +188,7 @@ public void Abort(AbortListener listener, Txn txn) /// Asynchronously write record bin(s). /// Create listener, call asynchronous put and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -203,13 +203,13 @@ public Task Put(WritePolicy policy, CancellationToken token, Key key, params Bin Put(policy, listener, key, bins); return listener.Task; } - + /// /// Asynchronously write record bin(s). /// Schedules the put command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -236,7 +236,7 @@ public void Put(WritePolicy policy, WriteListener listener, Key key, params Bin[ /// Asynchronously append bin string values to existing record bin values. /// Create listener, call asynchronous append and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for string values. /// @@ -258,7 +258,7 @@ public Task Append(WritePolicy policy, CancellationToken token, Key key, params /// Schedule the append command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for string values. /// @@ -282,7 +282,7 @@ public void Append(WritePolicy policy, WriteListener listener, Key key, params B /// Asynchronously prepend bin string values to existing record bin values. /// Create listener, call asynchronous prepend and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call works only for string values. /// @@ -304,7 +304,7 @@ public Task Prepend(WritePolicy policy, CancellationToken token, Key key, params /// Schedule the prepend command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call works only for string values. /// @@ -332,7 +332,7 @@ public void Prepend(WritePolicy policy, WriteListener listener, Key key, params /// Asynchronously add integer/double bin values to existing record bin values. /// Create listener, call asynchronous add and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// ///
@@ -353,7 +353,7 @@ public Task Add(WritePolicy policy, CancellationToken token, Key key, params Bin /// Schedule the add command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// ///
@@ -1009,7 +1009,10 @@ public void Get(BatchPolicy policy, RecordSequenceListener listener, Key[] keys, } policy.Txn?.SetNamespace(keys); - AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, binNames, null, Command.INFO1_READ, false); + int readAttr = (binNames == null || binNames.Length == 0)? + Command.INFO1_READ | Command.INFO1_GET_ALL : Command.INFO1_READ; + + AsyncBatchGetSequenceExecutor executor = new(cluster, policy, listener, keys, binNames, null, readAttr, false); executor.Execute(); } @@ -1534,7 +1537,7 @@ public Task Execute(WritePolicy policy, CancellationToken token, Key key /// server package name where user defined function resides /// user defined function /// arguments passed in to user defined function - /// if transaction fails + /// if command fails public void Execute(WritePolicy policy, ExecuteListener listener, Key key, string packageName, string functionName, params Value[] functionArgs) { if (policy == null) diff --git a/AerospikeClient/Async/AsyncClientPolicy.cs b/AerospikeClient/Async/AsyncClientPolicy.cs index 339ebc4f..0ff626f8 100644 --- a/AerospikeClient/Async/AsyncClientPolicy.cs +++ b/AerospikeClient/Async/AsyncClientPolicy.cs @@ -86,7 +86,7 @@ public sealed class AsyncClientPolicy : ClientPolicy public int asyncMinConnsPerNode; /// - /// Maximum number of asynchronous connections allowed per server node. Transactions will go + /// Maximum number of asynchronous connections allowed per server node. Commands will go /// through retry logic and potentially fail with "ResultCode.NO_MORE_CONNECTIONS" if the maximum /// number of connections would be exceeded. /// diff --git a/AerospikeClient/Async/AsyncCommand.cs b/AerospikeClient/Async/AsyncCommand.cs index 8a70a0c0..a282f3a8 100644 --- a/AerospikeClient/Async/AsyncCommand.cs +++ b/AerospikeClient/Async/AsyncCommand.cs @@ -431,7 +431,7 @@ public void ReceiveComplete() { // Authentication failed. Session token probably expired. // Signal tend thread to perform node login, so future - // transactions do not fail. + // commands do not fail. node.SignalLogin(); // This is a rare event because the client tracks session @@ -724,7 +724,7 @@ public bool CheckTimeout() node?.AddTimeout(); // Notify user immediately in this timeout thread. - // Transaction thread will cleanup eventArgs. + // Command thread will cleanup eventArgs. NotifyFailure(new AerospikeException.Timeout(policy, true)); } return false; // Do not put back on timeout queue. @@ -746,7 +746,7 @@ public bool CheckTimeout() // Socket timeout has occurred. if (Interlocked.CompareExchange(ref state, FAIL_SOCKET_TIMEOUT, IN_PROGRESS) == IN_PROGRESS) { - // User will be notified in transaction thread and this timeout thread. + // User will be notified in command thread and this timeout thread. // Close connection. This will result in a socket error in the async callback thread // and a possible retry. if (node != null && conn != null) @@ -775,14 +775,14 @@ protected internal void Finish() } else if (status == FAIL_TOTAL_TIMEOUT) { - // Timeout thread closed connection, but transaction still completed. + // Timeout thread closed connection, but command still completed. // User has already been notified with timeout. Release buffer and return. ReleaseBuffer(); return; } else if (status == FAIL_SOCKET_TIMEOUT) { - // Timeout thread closed connection, but transaction still completed. + // Timeout thread closed connection, but command still completed. // User has not been notified of the timeout. Release buffer and let // OnSuccess() be called. ReleaseBuffer(); diff --git a/AerospikeClient/Async/AsyncConnector.cs b/AerospikeClient/Async/AsyncConnector.cs index 7bf07a6b..634dc0be 100644 --- a/AerospikeClient/Async/AsyncConnector.cs +++ b/AerospikeClient/Async/AsyncConnector.cs @@ -225,7 +225,7 @@ public void ReceiveComplete() { // Authentication failed. Session token probably expired. // Signal tend thread to perform node login, so future - // transactions do not fail. + // commands do not fail. node.SignalLogin(); // This is a rare event because the client tracks session diff --git a/AerospikeClient/Async/AsyncDelete.cs b/AerospikeClient/Async/AsyncDelete.cs index 99e79ea1..2cdb45f1 100644 --- a/AerospikeClient/Async/AsyncDelete.cs +++ b/AerospikeClient/Async/AsyncDelete.cs @@ -26,7 +26,6 @@ public AsyncDelete(AsyncCluster cluster, WritePolicy writePolicy, Key key, Delet : base(cluster, writePolicy, key) { this.listener = listener; - cluster.AddCommand(); } public AsyncDelete(AsyncDelete other) diff --git a/AerospikeClient/Async/AsyncExists.cs b/AerospikeClient/Async/AsyncExists.cs index e57daa30..4f1eea6a 100644 --- a/AerospikeClient/Async/AsyncExists.cs +++ b/AerospikeClient/Async/AsyncExists.cs @@ -47,6 +47,7 @@ protected internal override void WriteBuffer() protected internal override bool ParseResult() { ParseHeader(); + ParseFields(policy.Txn, key, false); if (resultCode == ResultCode.OK) { diff --git a/AerospikeClient/Async/AsyncOperateWrite.cs b/AerospikeClient/Async/AsyncOperateWrite.cs index 2e2093be..11e7209a 100644 --- a/AerospikeClient/Async/AsyncOperateWrite.cs +++ b/AerospikeClient/Async/AsyncOperateWrite.cs @@ -28,6 +28,7 @@ public sealed class AsyncOperateWrite : AsyncWriteBase public AsyncOperateWrite(AsyncCluster cluster, RecordListener listener, Key key, OperateArgs args) : base(cluster, args.writePolicy, key) { + this.listener = listener; this.args = args; } diff --git a/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs b/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs index 747c100c..f308117a 100644 --- a/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs +++ b/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs @@ -41,7 +41,7 @@ PartitionTracker tracker this.statement = statement; this.tracker = tracker; - cluster.AddCommand(); + cluster.AddCommandCount(); tracker.SleepBetweenRetries = 0; taskId = statement.PrepareTaskId(); QueryPartitions(); diff --git a/AerospikeClient/Async/AsyncReadBase.cs b/AerospikeClient/Async/AsyncReadBase.cs index 2229adba..4ac55d47 100644 --- a/AerospikeClient/Async/AsyncReadBase.cs +++ b/AerospikeClient/Async/AsyncReadBase.cs @@ -27,7 +27,7 @@ public AsyncReadBase(AsyncCluster cluster, Policy policy, Key key) { this.key = key; this.partition = Partition.Read(cluster, policy, key); - cluster.AddCommand(); + cluster.AddCommandCount(); } public AsyncReadBase(AsyncReadBase other) diff --git a/AerospikeClient/Async/AsyncReadHeader.cs b/AerospikeClient/Async/AsyncReadHeader.cs index cc75781a..d46df6ea 100644 --- a/AerospikeClient/Async/AsyncReadHeader.cs +++ b/AerospikeClient/Async/AsyncReadHeader.cs @@ -26,7 +26,7 @@ public AsyncReadHeader(AsyncCluster cluster, Policy policy, RecordListener liste : base(cluster, policy, key) { this.listener = listener; - cluster.AddCommand(); + cluster.AddCommandCount(); } public AsyncReadHeader(AsyncReadHeader other) diff --git a/AerospikeClient/Async/AsyncScanPartitionExecutor.cs b/AerospikeClient/Async/AsyncScanPartitionExecutor.cs index 120fd7e2..20cbc5c0 100644 --- a/AerospikeClient/Async/AsyncScanPartitionExecutor.cs +++ b/AerospikeClient/Async/AsyncScanPartitionExecutor.cs @@ -46,7 +46,7 @@ PartitionTracker tracker this.binNames = binNames; this.tracker = tracker; - cluster.AddCommand(); + cluster.AddCommandCount(); tracker.SleepBetweenRetries = 0; ScanPartitions(); } diff --git a/AerospikeClient/Async/AsyncSingleCommand.cs b/AerospikeClient/Async/AsyncSingleCommand.cs index bad45ec1..a1847403 100644 --- a/AerospikeClient/Async/AsyncSingleCommand.cs +++ b/AerospikeClient/Async/AsyncSingleCommand.cs @@ -51,9 +51,9 @@ protected void ParseHeader() dataOffset += Command.MSG_REMAINING_HEADER_SIZE; } - protected void ParseFields(Txn tran, Key key, bool hasWrite) + protected void ParseFields(Txn txn, Key key, bool hasWrite) { - if (tran == null) + if (txn == null) { SkipFields(fieldCount); return; @@ -85,11 +85,11 @@ protected void ParseFields(Txn tran, Key key, bool hasWrite) if (hasWrite) { - tran.OnWrite(key, version, resultCode); + txn.OnWrite(key, version, resultCode); } else { - tran.OnRead(key, version); + txn.OnRead(key, version); } } @@ -104,7 +104,7 @@ protected void SkipFields(int fieldCount) } } - protected void ParseTranDeadline(Txn txn) + protected void ParseTxnDeadline(Txn txn) { for (int i = 0; i < fieldCount; i++) { diff --git a/AerospikeClient/Async/AsyncTxnAddKeys.cs b/AerospikeClient/Async/AsyncTxnAddKeys.cs index 66cdfc68..3c472cc1 100644 --- a/AerospikeClient/Async/AsyncTxnAddKeys.cs +++ b/AerospikeClient/Async/AsyncTxnAddKeys.cs @@ -54,7 +54,7 @@ protected internal override void WriteBuffer() protected internal override bool ParseResult() { ParseHeader(); - ParseTranDeadline(policy.Txn); + ParseTxnDeadline(policy.Txn); if (resultCode == ResultCode.OK) { diff --git a/AerospikeClient/Async/AsyncTxnMarkRollForward.cs b/AerospikeClient/Async/AsyncTxnMarkRollForward.cs index 6ebd03f4..c7ca4f46 100644 --- a/AerospikeClient/Async/AsyncTxnMarkRollForward.cs +++ b/AerospikeClient/Async/AsyncTxnMarkRollForward.cs @@ -56,6 +56,8 @@ protected internal override bool ParseResult() { ParseHeader(); + // BIN_EXISTS_ERROR is considered a success because it means a previous attempt already + // succeeded in notifying the server that the MRT will be rolled forward. if (resultCode == ResultCode.OK || resultCode == ResultCode.BIN_EXISTS_ERROR) { return true; diff --git a/AerospikeClient/Async/AsyncTxnMonitor.cs b/AerospikeClient/Async/AsyncTxnMonitor.cs index f7f20c36..fea0e19a 100644 --- a/AerospikeClient/Async/AsyncTxnMonitor.cs +++ b/AerospikeClient/Async/AsyncTxnMonitor.cs @@ -42,9 +42,9 @@ public static void Execute(AsyncCluster cluster, WritePolicy policy, AsyncWriteB } // Add key to MRT monitor and then run original command. - Operation[] ops = TxnMonitor.GetTranOps(txn, cmdKey); + Operation[] ops = TxnMonitor.GetTxnOps(txn, cmdKey); SingleTxnMonitor stm = new(cluster, command); - stm.Execute(policy, ops); + stm.Execute(cluster, policy, ops); } public static void ExecuteBatch( @@ -61,9 +61,9 @@ Key[] keys } // Add write keys to MRT monitor and then run original command. - Operation[] ops = TxnMonitor.GetTranOps(policy.Txn, keys); + Operation[] ops = TxnMonitor.GetTxnOps(policy.Txn, keys); BatchTxnMonitor ate = new(executor); - ate.Execute(policy, ops); + ate.Execute(executor.cluster, policy, ops); } public static void ExecuteBatch( @@ -80,7 +80,7 @@ List records } // Add write keys to MRT monitor and then run original command. - Operation[] ops = TxnMonitor.GetTranOps(policy.Txn, records); + Operation[] ops = TxnMonitor.GetTxnOps(policy.Txn, records); if (ops == null) { @@ -90,7 +90,7 @@ List records } BatchTxnMonitor ate = new(executor); - ate.Execute(policy, ops); + ate.Execute(executor.cluster, policy, ops); } public sealed class SingleTxnMonitor : AsyncTxnMonitor @@ -143,17 +143,17 @@ private AsyncTxnMonitor(AsyncCommand command, AsyncCluster cluster) this.cluster = cluster; } - void Execute(Policy policy, Operation[] ops) + void Execute(AsyncCluster cluster, Policy policy, Operation[] ops) { - Key tranKey = TxnMonitor.GetTxnMonitorKey(policy.Txn); + Key txnKey = TxnMonitor.GetTxnMonitorKey(policy.Txn); WritePolicy wp = TxnMonitor.CopyTimeoutPolicy(policy); - ExecuteRecordListener tranListener = new(this); + ExecuteRecordListener txnListener = new(this); // Add write key(s) to MRT monitor. OperateArgs args = new(wp, null, null, ops); - AsyncTxnAddKeys tranCommand = new(cluster, tranListener, tranKey, args); - tranCommand.Execute(); + AsyncTxnAddKeys txnCommand = new(cluster, txnListener, txnKey, args); + txnCommand.Execute(); } private void NotifyFailure(AerospikeException ae) @@ -199,7 +199,7 @@ public void OnSuccess(Key key, Record record) public void OnFailure(AerospikeException ae) { - monitor.NotifyFailure(new AerospikeException(ResultCode.TRAN_FAILED, "Failed to add key(s) to MRT monitor", ae)); + monitor.NotifyFailure(new AerospikeException(ResultCode.TXN_FAILED, "Failed to add key(s) to MRT monitor", ae)); } } } diff --git a/AerospikeClient/Async/AsyncTxnRoll.cs b/AerospikeClient/Async/AsyncTxnRoll.cs index 10bdad1a..bf3793ce 100644 --- a/AerospikeClient/Async/AsyncTxnRoll.cs +++ b/AerospikeClient/Async/AsyncTxnRoll.cs @@ -27,7 +27,7 @@ public sealed class AsyncTxnRoll private readonly BatchPolicy rollPolicy; private readonly WritePolicy writePolicy; private readonly Txn txn; - private readonly Key tranKey; + private readonly Key txnKey; private CommitListener commitListener; private AbortListener abortListener; private BatchRecord[] verifyRecords; @@ -47,7 +47,7 @@ Txn txn this.rollPolicy = rollPolicy; this.writePolicy = new WritePolicy(rollPolicy); this.txn = txn; - this.tranKey = TxnMonitor.GetTxnMonitorKey(txn); + this.txnKey = TxnMonitor.GetTxnMonitorKey(txn); } public void Commit(CommitListener listener) @@ -97,7 +97,7 @@ private void MarkRollForward() try { MarkRollForwardListener writeListener = new(this); - AsyncTxnMarkRollForward command = new(cluster, txn, writeListener, writePolicy, tranKey); + AsyncTxnMarkRollForward command = new(cluster, txn, writeListener, writePolicy, txnKey); command.Execute(); } catch (Exception t) @@ -187,7 +187,7 @@ private void CloseOnAbort() try { CloseOnAbortListener deleteListener = new(this); - AsyncTxnClose command = new(cluster, txn, deleteListener, writePolicy, tranKey); + AsyncTxnClose command = new(cluster, txn, deleteListener, writePolicy, txnKey); command.Execute(); } catch (Exception t) diff --git a/AerospikeClient/Async/AsyncWrite.cs b/AerospikeClient/Async/AsyncWrite.cs index 923a5dec..b3d8d972 100644 --- a/AerospikeClient/Async/AsyncWrite.cs +++ b/AerospikeClient/Async/AsyncWrite.cs @@ -36,7 +36,6 @@ Operation.Type operation this.listener = listener; this.bins = bins; this.operation = operation; - cluster.AddCommand(); } public AsyncWrite(AsyncWrite other) diff --git a/AerospikeClient/Async/AsyncWriteBase.cs b/AerospikeClient/Async/AsyncWriteBase.cs index 638951f1..157ade10 100644 --- a/AerospikeClient/Async/AsyncWriteBase.cs +++ b/AerospikeClient/Async/AsyncWriteBase.cs @@ -33,7 +33,7 @@ Key key this.writePolicy = writePolicy; this.Key = key; this.partition = Partition.Write(cluster, policy, key); - cluster.AddCommand(); + cluster.AddCommandCount(); } public AsyncWriteBase(AsyncWriteBase other) diff --git a/AerospikeClient/Async/IAsyncClient.cs b/AerospikeClient/Async/IAsyncClient.cs index 57172fcd..47a709d6 100644 --- a/AerospikeClient/Async/IAsyncClient.cs +++ b/AerospikeClient/Async/IAsyncClient.cs @@ -44,7 +44,7 @@ public interface IAsyncClient : IAerospikeClient /// /// Asynchronously attempt to commit the given multi-record transaction. First, the expected /// record versions are sent to the server nodes for verification.If all nodes return success, - /// the transaction is committed.Otherwise, the transaction is aborted. + /// the transaction is committed. Otherwise, the transaction is aborted. /// /// This method registers the command with an event loop and returns. /// The event loop thread will process the command and send the results to the listener. @@ -53,7 +53,7 @@ public interface IAsyncClient : IAerospikeClient /// /// /// where to send results - /// multi-record transaction + /// multi-record transaction void Commit(CommitListener listener, Txn txn); /// @@ -66,8 +66,8 @@ public interface IAsyncClient : IAerospikeClient /// /// /// - /// - void Abort(AbortListener listener, Txn tran); + /// + void Abort(AbortListener listener, Txn txn); //------------------------------------------------------- // Write Record Operations @@ -77,7 +77,7 @@ public interface IAsyncClient : IAerospikeClient /// Asynchronously write record bin(s). /// Create listener, call asynchronous put and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -93,7 +93,7 @@ public interface IAsyncClient : IAerospikeClient /// Schedules the put command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -112,7 +112,7 @@ public interface IAsyncClient : IAerospikeClient /// Asynchronously append bin string values to existing record bin values. /// Create listener, call asynchronous append and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for string values. /// @@ -129,7 +129,7 @@ public interface IAsyncClient : IAerospikeClient /// Schedule the append command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for string values. /// @@ -145,7 +145,7 @@ public interface IAsyncClient : IAerospikeClient /// Asynchronously prepend bin string values to existing record bin values. /// Create listener, call asynchronous prepend and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call works only for string values. /// @@ -162,7 +162,7 @@ public interface IAsyncClient : IAerospikeClient /// Schedule the prepend command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call works only for string values. /// @@ -182,7 +182,7 @@ public interface IAsyncClient : IAerospikeClient /// Asynchronously add integer bin values to existing record bin values. /// Create listener, call asynchronous add and return task monitor. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for integer values. /// @@ -199,7 +199,7 @@ public interface IAsyncClient : IAerospikeClient /// Schedule the add command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -218,7 +218,7 @@ public interface IAsyncClient : IAerospikeClient /// Asynchronously delete record for specified key. /// Create listener, call asynchronous delete and return task monitor. /// - /// The policy specifies the transaction timeout. + /// The policy specifies the command timeout. /// /// /// delete configuration parameters, pass in null for defaults @@ -882,7 +882,7 @@ public interface IAsyncClient : IAerospikeClient /// server package name where user defined function resides /// user defined function /// arguments passed in to user defined function - /// if transaction fails + /// if command fails void Execute(WritePolicy policy, ExecuteListener listener, Key key, string packageName, string functionName, params Value[] functionArgs); /// diff --git a/AerospikeClient/AsyncTask/BatchOperateListListenerAdapter.cs b/AerospikeClient/AsyncTask/BatchOperateListListenerAdapter.cs index 6b2616fc..44c295f5 100644 --- a/AerospikeClient/AsyncTask/BatchOperateListListenerAdapter.cs +++ b/AerospikeClient/AsyncTask/BatchOperateListListenerAdapter.cs @@ -29,7 +29,7 @@ public BatchOperateListListenerAdapter(CancellationToken token) public void OnSuccess(List records, bool status) { // records is an argument to the async call, so the user already has access to it. - // Set completion status: true if all batch sub-transactions were successful. + // Set completion status: true if all batch sub-commands were successful. SetResult(status); } } diff --git a/AerospikeClient/Cluster/Cluster.cs b/AerospikeClient/Cluster/Cluster.cs index 9f8b5342..3614f6c9 100644 --- a/AerospikeClient/Cluster/Cluster.cs +++ b/AerospikeClient/Cluster/Cluster.cs @@ -1200,7 +1200,7 @@ private static bool SupportsPartitionQuery(Node[] nodes) /// /// Increment command count when metrics are enabled. /// - public void AddCommand() + public void AddCommandCount() { if (MetricsEnabled) { diff --git a/AerospikeClient/Cluster/ClusterStats.cs b/AerospikeClient/Cluster/ClusterStats.cs index 3aa19513..7e7f855f 100644 --- a/AerospikeClient/Cluster/ClusterStats.cs +++ b/AerospikeClient/Cluster/ClusterStats.cs @@ -51,7 +51,7 @@ public sealed class ClusterStats public readonly int invalidNodeCount; /// - /// Count of command retires since cluster was started. + /// Count of command retries since cluster was started. /// public readonly long RetryCount; diff --git a/AerospikeClient/Command/Batch.cs b/AerospikeClient/Command/Batch.cs index aec9cdf1..f034b103 100644 --- a/AerospikeClient/Command/Batch.cs +++ b/AerospikeClient/Command/Batch.cs @@ -515,14 +515,14 @@ public BatchTxnVerify( Cluster cluster, BatchNode batch, BatchPolicy batchPolicy, - Txn tran, + Txn txn, Key[] keys, long[] versions, BatchRecord[] records, BatchStatus status ) : base(cluster, batch, batchPolicy, status, false) { - this.txn = tran; + this.txn = txn; this.keys = keys; this.versions = versions; this.records = records; diff --git a/AerospikeClient/Command/BatchAttr.cs b/AerospikeClient/Command/BatchAttr.cs index 723a6880..b58f03bb 100644 --- a/AerospikeClient/Command/BatchAttr.cs +++ b/AerospikeClient/Command/BatchAttr.cs @@ -22,7 +22,7 @@ public sealed class BatchAttr public int readAttr; public int writeAttr; public int infoAttr; - public int tranAttr; + public int txnAttr; public int expiration; public int opSize; public short generation; @@ -415,7 +415,7 @@ public void SetTxn(int attr) readAttr = 0; writeAttr = Command.INFO2_WRITE | Command.INFO2_RESPOND_ALL_OPS | Command.INFO2_DURABLE_DELETE; infoAttr = 0; - tranAttr = attr; + txnAttr = attr; expiration = 0; generation = 0; hasWrite = true; diff --git a/AerospikeClient/Command/BatchExecutor.cs b/AerospikeClient/Command/BatchExecutor.cs index 6da7f1ca..59c4cb5d 100644 --- a/AerospikeClient/Command/BatchExecutor.cs +++ b/AerospikeClient/Command/BatchExecutor.cs @@ -23,7 +23,7 @@ public sealed class BatchExecutor { public static void Execute(Cluster cluster, BatchPolicy policy, BatchCommand[] commands, BatchStatus status) { - cluster.AddCommand(); + cluster.AddCommandCount(); if (policy.maxConcurrentThreads == 1 || commands.Length <= 1) { diff --git a/AerospikeClient/Command/ByteUtil.cs b/AerospikeClient/Command/ByteUtil.cs index c01faea0..456fcd1d 100644 --- a/AerospikeClient/Command/ByteUtil.cs +++ b/AerospikeClient/Command/ByteUtil.cs @@ -41,7 +41,7 @@ public static Value BytesToKeyValue(ParticleType type, byte[] buf, int offset, i case ParticleType.BLOB: byte[] dest = new byte[len]; Array.Copy(buf, offset, dest, 0, len); - return Value.Get(dest); + return Value.Get(dest); default: return null; diff --git a/AerospikeClient/Command/Command.cs b/AerospikeClient/Command/Command.cs index 9940f7d5..f5ca7ac6 100644 --- a/AerospikeClient/Command/Command.cs +++ b/AerospikeClient/Command/Command.cs @@ -34,7 +34,7 @@ public abstract class Command public static readonly int INFO2_DELETE = (1 << 1); // Fling a record into the belly of Moloch. public static readonly int INFO2_GENERATION = (1 << 2); // Update if expected generation == old. public static readonly int INFO2_GENERATION_GT = (1 << 3); // Update if new generation >= old, good for restore. - public static readonly int INFO2_DURABLE_DELETE = (1 << 4); // Transaction resulting in record deletion leaves tombstone (Enterprise only). + public static readonly int INFO2_DURABLE_DELETE = (1 << 4); // Command resulting in record deletion leaves tombstone (Enterprise only). public static readonly int INFO2_CREATE_ONLY = (1 << 5); // Create only. Fail if record already exists. public static readonly int INFO2_RELAX_AP_LONG_QUERY = (1 << 6); // Treat as long query, but relac read consistency public static readonly int INFO2_RESPOND_ALL_OPS = (1 << 7); // Return a result for every operation. @@ -122,7 +122,7 @@ public void SetTxnAddKeys(WritePolicy policy, Key key, OperateArgs args) Begin(); int fieldCount = EstimateKeySize(key); dataOffset += args.size; - WriteTranMonitor(key, args.readAttr, args.writeAttr, fieldCount, args.operations.Length); + WriteTxnMonitor(key, args.readAttr, args.writeAttr, fieldCount, args.operations.Length); foreach (Operation operation in args.operations) { @@ -131,7 +131,7 @@ public void SetTxnAddKeys(WritePolicy policy, Key key, OperateArgs args) End(policy.compress); } - public void SetTranVerify(Txn tran, Key key, long ver) + public void SetTxnVerify(Txn txn, Key key, long ver) { Begin(); int fieldCount = EstimateKeySize(key); @@ -162,19 +162,19 @@ public void SetTranVerify(Txn tran, Key key, long ver) public void SetBatchTxnVerify( BatchPolicy policy, - Txn tran, + Txn txn, Key[] keys, long[] versions, BatchNode batch ) { BatchOffsetsNative offsets = new(batch); - SetBatchTranVerify(policy, tran, keys, versions, offsets); + SetBatchTxnVerify(policy, txn, keys, versions, offsets); } - public void SetBatchTranVerify( + public void SetBatchTxnVerify( BatchPolicy policy, - Txn tran, + Txn txn, Key[] keys, long[] versions, BatchOffsets offsets @@ -186,7 +186,8 @@ BatchOffsets offsets // Batch field dataOffset += FIELD_HEADER_SIZE + 5; - Key prev = null; + Key keyPrev = null; + long? verPrev = null; int max = offsets.Size(); for (int i = 0; i < max; i++) @@ -197,7 +198,7 @@ BatchOffsets offsets dataOffset += key.digest.Length + 4; - if (CanRepeat(key, prev, ver)) + if (CanRepeat(key, keyPrev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; @@ -205,15 +206,16 @@ BatchOffsets offsets else { // Write full header and namespace/set/bin names. - dataOffset += 9; // header(4) + fieldCount(2) + opCount(2) = 9 + dataOffset += 9; // header(4) + info4(1) + fieldCount(2) + opCount(2) = 9 dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - if (ver != null) + if (ver.HasValue) { dataOffset += 7 + FIELD_HEADER_SIZE; } - prev = key; + keyPrev = key; + verPrev = ver; } } @@ -225,9 +227,10 @@ BatchOffsets offsets WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset+= 4; + dataOffset += 4; dataBuffer[dataOffset++] = GetBatchFlags(policy); - prev = null; + keyPrev = null; + verPrev = null; for (int i = 0; i < max; i++) { @@ -242,7 +245,7 @@ BatchOffsets offsets Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; - if (CanRepeat(key, prev, ver)) + if (CanRepeat(key, keyPrev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; @@ -270,7 +273,8 @@ BatchOffsets offsets WriteFieldVersion(ver.Value); } - prev = key; + keyPrev = key; + verPrev = ver; } } @@ -279,24 +283,24 @@ BatchOffsets offsets End(compress); } - public void SetTxnMarkRollForward(Txn tran, Key key) + public void SetTxnMarkRollForward(Txn txn, Key key) { Bin bin = new("fwd", true); Begin(); int fieldCount = EstimateKeySize(key); EstimateOperationSize(bin); - WriteTranMonitor(key, 0, Command.INFO2_WRITE, fieldCount, 1); + WriteTxnMonitor(key, 0, Command.INFO2_WRITE, fieldCount, 1); WriteOperation(bin, Operation.Type.WRITE); End(); } - public void SetTranRoll(Key key, Txn tran, int tranAttr) + public void SetTxnRoll(Key key, Txn txn, int txnAttr) { Begin(); int fieldCount = EstimateKeySize(key); - fieldCount += SizeTran(key, tran, false); + fieldCount += SizeTxn(key, txn, false); SizeBuffer(); dataOffset += 8; @@ -304,7 +308,7 @@ public void SetTranRoll(Key key, Txn tran, int tranAttr) dataBuffer[dataOffset++] = (byte)0; dataBuffer[dataOffset++] = (byte)(Command.INFO2_WRITE | Command.INFO2_DURABLE_DELETE); dataBuffer[dataOffset++] = (byte)0; - dataBuffer[dataOffset++] = (byte)tranAttr; + dataBuffer[dataOffset++] = (byte)txnAttr; dataBuffer[dataOffset++] = 0; // clear the result code dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); @@ -313,7 +317,7 @@ public void SetTranRoll(Key key, Txn tran, int tranAttr) dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); WriteKey(key); - WriteTran(tran, false); + WriteTxn(txn, false); End(); } @@ -325,10 +329,10 @@ BatchAttr attr ) { BatchOffsetsNative offsets = new(batch); - SetBatchTranRoll(policy, keys, attr, offsets); + SetBatchTxnRoll(policy, keys, attr, offsets); } - public void SetBatchTranRoll( + public void SetBatchTxnRoll( BatchPolicy policy, Key[] keys, BatchAttr attr, @@ -339,20 +343,21 @@ BatchOffsets offsets Begin(); int fieldCount = 1; int max = offsets.Size(); - Txn tran = policy.Txn; + Txn txn = policy.Txn; long?[] versions = new long?[max]; for (int i = 0; i < max; i++) { int offset = offsets.Get(i); Key key = keys[offset]; - versions[i] = tran.GetReadVersion(key); + versions[i] = txn.GetReadVersion(key); } // Batch field dataOffset += FIELD_HEADER_SIZE + 5; - Key prev = null; + Key keyPrev = null; + long? verPrev = null; for (int i = 0; i < max; i++) { @@ -362,7 +367,7 @@ BatchOffsets offsets dataOffset += key.digest.Length + 4; - if (CanRepeat(key, prev, ver)) + if (CanRepeat(key, keyPrev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; @@ -370,12 +375,13 @@ BatchOffsets offsets else { // Write full header and namespace/set/bin names. - dataOffset += 13; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 13 + dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - SizeTranBatch(tran, ver); + SizeTxnBatch(txn, ver); dataOffset += 2; // gen(2) = 2 - prev = key; + keyPrev = key; + verPrev = ver; } } @@ -389,7 +395,8 @@ BatchOffsets offsets ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = GetBatchFlags(policy); - prev = null; + keyPrev = null; + verPrev = null; for (int i = 0; i < max; i++) { @@ -404,7 +411,7 @@ BatchOffsets offsets Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; - if (CanRepeat(key, prev, ver)) + if (CanRepeat(key, keyPrev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; @@ -412,8 +419,9 @@ BatchOffsets offsets else { // Write full message. - WriteBatchWrite(key, tran, ver, attr, null, 0, 0); - prev = key; + WriteBatchWrite(key, txn, ver, attr, null, 0, 0); + keyPrev = key; + verPrev = ver; } } @@ -422,16 +430,16 @@ BatchOffsets offsets End(compress); } - public void SetTxnClose(Txn tran, Key key) + public void SetTxnClose(Txn txn, Key key) { Begin(); int fieldCount = EstimateKeySize(key); - WriteTranMonitor(key, 0, Command.INFO2_WRITE | Command.INFO2_DELETE | Command.INFO2_DURABLE_DELETE, + WriteTxnMonitor(key, 0, Command.INFO2_WRITE | Command.INFO2_DELETE | Command.INFO2_DURABLE_DELETE, fieldCount, 0); End(); } - private void WriteTranMonitor(Key key, int readAttr, int writeAttr, int fieldCount, int opCount) + private void WriteTxnMonitor(Key key, int readAttr, int writeAttr, int fieldCount, int opCount) { SizeBuffer(); dataOffset += 8; @@ -470,7 +478,7 @@ public virtual void SetWrite(WritePolicy policy, Operation.Type operation, Key k EstimateOperationSize(bin); } - bool compress = SizeBuffer(policy); // TODO this is different from java + bool compress = SizeBuffer(policy); WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, bins.Length); WriteKey(policy, key, true); @@ -1121,10 +1129,10 @@ public void SetBatchOperate( { Begin(); int max = offsets.Size(); - Txn tran = policy.Txn; + Txn txn = policy.Txn; long?[] versions = null; - if (tran != null) + if (txn != null) { versions = new long?[max]; @@ -1132,7 +1140,7 @@ public void SetBatchOperate( { int offset = offsets.Get(i); BatchRecord record = (BatchRecord)records[offset]; - versions[i] = tran.GetReadVersion(record.key); + versions[i] = txn.GetReadVersion(record.key); } } @@ -1147,6 +1155,7 @@ public void SetBatchOperate( dataOffset += FIELD_HEADER_SIZE + 5; BatchRecord prev = null; + long? verPrev = null; for (int i = 0; i < max; i++) { @@ -1157,7 +1166,7 @@ public void SetBatchOperate( dataOffset += key.digest.Length + 4; - if (CanRepeat(policy, key, record, prev, ver)) + if (CanRepeat(policy, key, record, prev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; @@ -1165,12 +1174,13 @@ public void SetBatchOperate( else { // Estimate full header, namespace and bin names. - dataOffset += 13; + dataOffset += 12; dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - SizeTranBatch(tran, ver); + SizeTxnBatch(txn, ver); dataOffset += record.Size(policy); prev = record; + verPrev = ver; } } bool compress = SizeBuffer(policy); @@ -1188,12 +1198,13 @@ public void SetBatchOperate( BatchAttr attr = new(); prev = null; + verPrev = null; for (int i = 0; i < max; i++) { int offset = offsets.Get(i); BatchRecord record = (BatchRecord)records[offset]; - long? ver = (versions != null) ? versions[i] : null; + long? ver = versions?[i]; ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); dataOffset += 4; @@ -1203,7 +1214,7 @@ public void SetBatchOperate( dataOffset += digest.Length; - if (CanRepeat(policy, key, record, prev, ver)) + if (CanRepeat(policy, key, record, prev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; @@ -1230,23 +1241,23 @@ public void SetBatchOperate( { if (br.binNames.Length > 0) { - WriteBatchBinNames(key, tran, ver, br.binNames, attr, attr.filterExp); + WriteBatchBinNames(key, txn, ver, br.binNames, attr, attr.filterExp); } else { attr.AdjustRead(true); - WriteBatchRead(key, tran, ver, attr, attr.filterExp, 0); + WriteBatchRead(key, txn, ver, attr, attr.filterExp, 0); } } else if (br.ops != null) { attr.AdjustRead(br.ops); - WriteBatchOperations(key, tran, ver, br.ops, attr, attr.filterExp); + WriteBatchOperations(key, txn, ver, br.ops, attr, attr.filterExp); } else { attr.AdjustRead(br.readAllBins); - WriteBatchRead(key, tran, ver, attr, attr.filterExp, 0); + WriteBatchRead(key, txn, ver, attr, attr.filterExp, 0); } break; } @@ -1264,7 +1275,7 @@ public void SetBatchOperate( attr.SetWrite(policy); } attr.AdjustWrite(bw.ops); - WriteBatchOperations(key, tran, ver, bw.ops, attr, attr.filterExp); + WriteBatchOperations(key, txn, ver, bw.ops, attr, attr.filterExp); break; } @@ -1299,11 +1310,12 @@ public void SetBatchOperate( { attr.SetDelete(policy); } - WriteBatchWrite(key, tran, ver, attr, attr.filterExp, 0, 0); + WriteBatchWrite(key, txn, ver, attr, attr.filterExp, 0, 0); break; } } prev = record; + verPrev = ver; } } @@ -1337,12 +1349,12 @@ BatchOffsets offsets { // Estimate full row size int max = offsets.Size(); - Txn tran = policy.Txn; + Txn txn = policy.Txn; long?[] versions = null; Begin(); - if (tran != null) + if (txn != null) { versions = new long?[max]; @@ -1350,7 +1362,7 @@ BatchOffsets offsets { int offset = offsets.Get(i); Key key = keys[offset]; - versions[i] = tran.GetReadVersion(key); + versions[i] = txn.GetReadVersion(key); } } @@ -1365,7 +1377,8 @@ BatchOffsets offsets dataOffset += FIELD_HEADER_SIZE + 5; - Key prev = null; + Key keyPrev = null; + long? verPrev = null; for (int i = 0; i < max; i++) { @@ -1375,7 +1388,7 @@ BatchOffsets offsets dataOffset += key.digest.Length + 4; - if (CanRepeat(key, prev, attr, ver)) + if (CanRepeat(attr, key, keyPrev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; @@ -1383,10 +1396,10 @@ BatchOffsets offsets else { // Write full header and namespace/set/bin names. - dataOffset += 13; // header(5) + ttl(4) + fieldCount(2) + opCount(2) = 13 + dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - SizeTranBatch(tran, ver); + SizeTxnBatch(txn, ver); if (attr.sendKey) { @@ -1419,7 +1432,8 @@ BatchOffsets offsets { dataOffset += 2; // Extra write specific fields. } - prev = key; + keyPrev = key; + verPrev = ver; } } @@ -1435,7 +1449,8 @@ BatchOffsets offsets ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = GetBatchFlags(policy); - prev = null; + keyPrev = null; + verPrev = null; for (int i = 0; i < max; i++) { @@ -1450,7 +1465,7 @@ BatchOffsets offsets Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; - if (CanRepeat(key, prev, attr, ver)) + if (CanRepeat(attr, key, keyPrev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; @@ -1460,21 +1475,22 @@ BatchOffsets offsets // Write full message. if (binNames != null) { - WriteBatchBinNames(key, tran, ver, binNames, attr, null); + WriteBatchBinNames(key, txn, ver, binNames, attr, null); } else if (ops != null) { - WriteBatchOperations(key, tran, ver, ops, attr, null); + WriteBatchOperations(key, txn, ver, ops, attr, null); } else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) { - WriteBatchWrite(key, tran, ver, attr, null, 0, 0); + WriteBatchWrite(key, txn, ver, attr, null, 0, 0); } else { - WriteBatchRead(key, tran, ver, attr, null, 0); + WriteBatchRead(key, txn, ver, attr, null, 0); } - prev = key; + keyPrev = key; + verPrev = ver; } } @@ -1511,10 +1527,10 @@ BatchOffsets offsets // Estimate buffer size. Begin(); int max = offsets.Size(); - Txn tran = policy.Txn; + Txn txn = policy.Txn; long?[] versions = null; - if (tran != null) + if (txn != null) { versions = new long?[max]; @@ -1522,7 +1538,7 @@ BatchOffsets offsets { int offset = offsets.Get(i); Key key = keys[offset]; - versions[i] = tran.GetReadVersion(key); + versions[i] = txn.GetReadVersion(key); } } @@ -1537,7 +1553,8 @@ BatchOffsets offsets dataOffset += FIELD_HEADER_SIZE + 5; - Key prev = null; + Key keyPrev = null; + long? verPrev = null; for (int i = 0; i < max; i++) { @@ -1547,7 +1564,7 @@ BatchOffsets offsets dataOffset += key.digest.Length + 4; - if (CanRepeat(key, prev, attr, ver)) + if (CanRepeat(attr, key, keyPrev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; @@ -1555,10 +1572,10 @@ BatchOffsets offsets else { // Write full header and namespace/set/bin names. - dataOffset += 13; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 13 + dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - SizeTranBatch(tran, ver); + SizeTxnBatch(txn, ver); if (attr.sendKey) { @@ -1566,7 +1583,8 @@ BatchOffsets offsets } dataOffset += 2; // gen(2) = 2 EstimateUdfSize(packageName, functionName, argBytes); - prev = key; + keyPrev = key; + verPrev = ver; } } @@ -1582,7 +1600,8 @@ BatchOffsets offsets ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = GetBatchFlags(policy); - prev = null; + keyPrev = null; + verPrev = null; for (int i = 0; i < max; i++) { @@ -1597,7 +1616,7 @@ BatchOffsets offsets Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); dataOffset += digest.Length; - if (CanRepeat(key, prev, attr, ver)) + if (CanRepeat(attr, key, keyPrev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; @@ -1605,11 +1624,12 @@ BatchOffsets offsets else { // Write full message. - WriteBatchWrite(key, tran, ver, attr, null, 3, 0); + WriteBatchWrite(key, txn, ver, attr, null, 3, 0); WriteField(packageName, FieldType.UDF_PACKAGE_NAME); WriteField(functionName, FieldType.UDF_FUNCTION); WriteField(argBytes, FieldType.UDF_ARGLIST); - prev = key; + keyPrev = key; + verPrev = ver; } } @@ -1623,27 +1643,29 @@ private static bool CanRepeat( Key key, BatchRecord record, BatchRecord prev, - long? ver + long? ver, + long? verPrev ) { // Avoid relatively expensive full equality checks for performance reasons. // Use reference equality only in hope that common namespaces/bin names are set from // fixed variables. It's fine if equality not determined correctly because it just // results in more space used. The batch will still be correct. - return ver == null && !policy.sendKey && prev != null && prev.key.ns == key.ns && + // Same goes for ver reference equality check. + return !policy.sendKey && verPrev == ver && prev != null && prev.key.ns == key.ns && prev.key.setName == key.setName && record.Equals(prev); } - private static bool CanRepeat(Key key, Key prev, BatchAttr attr, long? ver) + private static bool CanRepeat(BatchAttr attr, Key key, Key keyPrev, long? ver, long? verPrev) { - return ver == null && !attr.sendKey && prev != null && prev.ns == key.ns && - prev.setName == key.setName; + return !attr.sendKey && verPrev == ver && keyPrev != null && keyPrev.ns == key.ns && + keyPrev.setName == key.setName; } - private static bool CanRepeat(Key key, Key prev, long? ver) + private static bool CanRepeat(Key key, Key keyPrev, long? ver, long? verPrev) { - return ver == null && prev != null && prev.ns == key.ns && - prev.setName == key.setName; + return verPrev == ver && keyPrev != null && keyPrev.ns == key.ns && + keyPrev.setName == key.setName; } private static Expression GetBatchExpression(Policy policy, BatchAttr attr) @@ -1672,18 +1694,19 @@ private static byte GetBatchFlags(BatchPolicy policy) return flags; } - private void SizeTranBatch(Txn tran, long? ver) + private void SizeTxnBatch(Txn txn, long? ver) { - if (tran != null) + if (txn != null) { + dataOffset++; // Add info4 byte for MRT. dataOffset += 8 + FIELD_HEADER_SIZE; - if (ver != null) + if (ver.HasValue) { dataOffset += 7 + FIELD_HEADER_SIZE; } - if (tran.Deadline != 0) + if (txn.Deadline != 0) { dataOffset += 4 + FIELD_HEADER_SIZE; } @@ -1703,17 +1726,19 @@ private void WriteBatchHeader(Policy policy, int timeout, int fieldCount) dataOffset += 8; dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. dataBuffer[dataOffset++] = (byte)readAttr; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)0; - Array.Clear(dataBuffer, dataOffset, 12); - dataOffset += 12; + Array.Clear(dataBuffer, dataOffset, 10); + dataOffset += 10; dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); } - private void WriteBatchBinNames(Key key, Txn tran, long? ver, string[] binNames, BatchAttr attr, Expression filter) + private void WriteBatchBinNames(Key key, Txn txn, long? ver, string[] binNames, BatchAttr attr, Expression filter) { - WriteBatchRead(key, tran, ver, attr, filter, binNames.Length); + WriteBatchRead(key, txn, ver, attr, filter, binNames.Length); foreach (string binName in binNames) { @@ -1721,15 +1746,15 @@ private void WriteBatchBinNames(Key key, Txn tran, long? ver, string[] binNames, } } - private void WriteBatchOperations(Key key, Txn tran, long? ver, Operation[] ops, BatchAttr attr, Expression filter) + private void WriteBatchOperations(Key key, Txn txn, long? ver, Operation[] ops, BatchAttr attr, Expression filter) { if (attr.hasWrite) { - WriteBatchWrite(key, tran, ver, attr, filter, 0, ops.Length); + WriteBatchWrite(key, txn, ver, attr, filter, 0, ops.Length); } else { - WriteBatchRead(key, tran, ver, attr, filter, ops.Length); + WriteBatchRead(key, txn, ver, attr, filter, ops.Length); } foreach (Operation op in ops) @@ -1738,44 +1763,72 @@ private void WriteBatchOperations(Key key, Txn tran, long? ver, Operation[] ops, } } - private void WriteBatchRead(Key key, Txn tran, long? ver, BatchAttr attr, Expression filter, int opCount) + private void WriteBatchRead(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int opCount) { - dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_TTL); - dataBuffer[dataOffset++] = (byte)attr.readAttr; - dataBuffer[dataOffset++] = (byte)attr.writeAttr; - dataBuffer[dataOffset++] = (byte)attr.infoAttr; - dataBuffer[dataOffset++] = (byte)attr.tranAttr; - ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); - dataOffset += 4; - WriteBatchFields(key, tran, ver, attr,filter, 0, opCount); + if (txn != null) + { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + dataBuffer[dataOffset++] = (byte)attr.txnAttr; + ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + WriteBatchFieldsTxn(key, txn, ver, attr, filter, 0, opCount); + } + else + { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + WriteBatchFieldsReg(key, attr, filter, 0, opCount); + } } - private void WriteBatchWrite(Key key, Txn tran, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) + private void WriteBatchWrite(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) { - dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_GEN | BATCH_MSG_TTL); - dataBuffer[dataOffset++] = (byte)attr.readAttr; - dataBuffer[dataOffset++] = (byte)attr.writeAttr; - dataBuffer[dataOffset++] = (byte)attr.infoAttr; - dataBuffer[dataOffset++] = (byte)attr.tranAttr; - ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset); - dataOffset += 2; - ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); - dataOffset += 4; - WriteBatchFields(key, tran, ver, attr, filter, fieldCount, opCount); + if (txn != null) + { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_GEN | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + dataBuffer[dataOffset++] = (byte)attr.txnAttr; + ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset); + dataOffset += 2; + ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + WriteBatchFieldsTxn(key, txn, ver, attr, filter, fieldCount, opCount); + } + else + { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_GEN | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset); + dataOffset += 2; + ByteUtil.ShortToBytes((ushort)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + WriteBatchFieldsReg(key, attr, filter, fieldCount, opCount); + } } - private void WriteBatchFields(Key key, Txn tran, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) + private void WriteBatchFieldsTxn(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) { - if (tran != null) + if (txn != null) { fieldCount++; - if (ver != null) + if (ver.HasValue) { fieldCount++; } - if (attr.hasWrite && tran.Deadline != 0) + if (attr.hasWrite && txn.Deadline != 0) { fieldCount++; } @@ -1793,19 +1846,16 @@ private void WriteBatchFields(Key key, Txn tran, long? ver, BatchAttr attr, Expr WriteBatchFields(key, fieldCount, opCount); - if (tran != null) - { - WriteFieldLE(tran.Id, FieldType.MRT_ID); + WriteFieldLE(txn.Id, FieldType.MRT_ID); - if (ver.HasValue) - { - WriteFieldVersion(ver.Value); - } + if (ver.HasValue) + { + WriteFieldVersion(ver.Value); + } - if (attr.hasWrite && tran.Deadline != 0) - { - WriteFieldLE(tran.Deadline, FieldType.MRT_DEADLINE); - } + if (attr.hasWrite && txn.Deadline != 0) + { + WriteFieldLE(txn.Deadline, FieldType.MRT_DEADLINE); } filter?.Write(this); @@ -1816,6 +1866,30 @@ private void WriteBatchFields(Key key, Txn tran, long? ver, BatchAttr attr, Expr } } + private void WriteBatchFieldsReg( + Key key, + BatchAttr attr, + Expression filter, + int fieldCount, + int opCount + ) { + if (filter != null) { + fieldCount++; + } + + if (attr.sendKey) { + fieldCount++; + } + + WriteBatchFields(key, fieldCount, opCount); + + filter?.Write(this); + + if (attr.sendKey) { + WriteField(key.userKey, FieldType.KEY); + } + } + private void WriteBatchFields(Key key, int fieldCount, int opCount) { fieldCount += 2; @@ -2356,7 +2430,7 @@ private int EstimateKeySize(Policy policy, Key key, bool sendDeadline) { int fieldCount = EstimateKeySize(key); - fieldCount += SizeTran(key, policy.Txn, sendDeadline); + fieldCount += SizeTxn(key, policy.Txn, sendDeadline); if (policy.sendKey) { @@ -2737,7 +2811,7 @@ int operationCount private void WriteKey(Policy policy, Key key, bool sendDeadline) { WriteKey(key); - WriteTran(policy.Txn, sendDeadline); + WriteTxn(policy.Txn, sendDeadline); if (policy.sendKey) { @@ -2847,24 +2921,24 @@ private void WriteOperation(Operation.Type operationType) dataBuffer[dataOffset++] = 0; } - private int SizeTran(Key key, Txn tran, bool sendDeadline) + private int SizeTxn(Key key, Txn txn, bool sendDeadline) { int fieldCount = 0; - if (tran != null) + if (txn != null) { dataOffset += 8 + FIELD_HEADER_SIZE; fieldCount++; - Version = tran.GetReadVersion(key); + Version = txn.GetReadVersion(key); - if (Version != null) + if (Version.HasValue) { dataOffset += 7 + FIELD_HEADER_SIZE; fieldCount++; } - if (sendDeadline && tran.Deadline != 0) + if (sendDeadline && txn.Deadline != 0) { dataOffset += 4 + FIELD_HEADER_SIZE; fieldCount++; @@ -2873,20 +2947,20 @@ private int SizeTran(Key key, Txn tran, bool sendDeadline) return fieldCount; } - private void WriteTran(Txn tran, bool sendDeadline) + private void WriteTxn(Txn txn, bool sendDeadline) { - if (tran != null) + if (txn != null) { - WriteFieldLE(tran.Id, FieldType.MRT_ID); + WriteFieldLE(txn.Id, FieldType.MRT_ID); if (Version.HasValue) { WriteFieldVersion(Version.Value); } - if (sendDeadline && tran.Deadline != 0) + if (sendDeadline && txn.Deadline != 0) { - WriteFieldLE(tran.Deadline, FieldType.MRT_DEADLINE); + WriteFieldLE(txn.Deadline, FieldType.MRT_DEADLINE); } } } diff --git a/AerospikeClient/Command/DeleteCommand.cs b/AerospikeClient/Command/DeleteCommand.cs index 2d87cf6e..fc871ec5 100644 --- a/AerospikeClient/Command/DeleteCommand.cs +++ b/AerospikeClient/Command/DeleteCommand.cs @@ -26,7 +26,7 @@ public sealed class DeleteCommand : SyncWriteCommand public DeleteCommand(Cluster cluster, WritePolicy writePolicy, Key key) : base(cluster, writePolicy, key) { - cluster.AddCommand(); + cluster.AddCommandCount(); } protected internal override void WriteBuffer() diff --git a/AerospikeClient/Command/ExecuteCommand.cs b/AerospikeClient/Command/ExecuteCommand.cs index 0c7a7bb4..e9898693 100644 --- a/AerospikeClient/Command/ExecuteCommand.cs +++ b/AerospikeClient/Command/ExecuteCommand.cs @@ -64,11 +64,6 @@ protected internal override void ParseResult(IConnection conn) return; } - if (opCount > 0) - { - throw new AerospikeException("Unexpected UDF opCount on error: " + opCount + ',' + resultCode); - } - if (resultCode == ResultCode.FILTERED_OUT) { if (policy.failOnFilteredOut) diff --git a/AerospikeClient/Command/OperateArgs.cs b/AerospikeClient/Command/OperateArgs.cs index 21ce2e4e..8f983036 100644 --- a/AerospikeClient/Command/OperateArgs.cs +++ b/AerospikeClient/Command/OperateArgs.cs @@ -130,17 +130,5 @@ Operation[] operations } writeAttr = wattr; } - - public Partition GetPartition(Cluster cluster, Key key) - { - if (hasWrite) - { - return Partition.Write(cluster, writePolicy, key); - } - else - { - return Partition.Read(cluster, writePolicy, key); - } - } } } diff --git a/AerospikeClient/Command/OperateCommandRead.cs b/AerospikeClient/Command/OperateCommandRead.cs index 9e15efb1..f20f2ec7 100644 --- a/AerospikeClient/Command/OperateCommandRead.cs +++ b/AerospikeClient/Command/OperateCommandRead.cs @@ -22,7 +22,7 @@ public sealed class OperateCommandRead : ReadCommand private readonly OperateArgs args; public OperateCommandRead(Cluster cluster, Key key, OperateArgs args) - : base(cluster, args.writePolicy, key, args.GetPartition(cluster, key), true) + : base(cluster, args.writePolicy, key, true) { this.args = args; } diff --git a/AerospikeClient/Command/OperateCommandWrite.cs b/AerospikeClient/Command/OperateCommandWrite.cs index a7144f8d..1d766092 100644 --- a/AerospikeClient/Command/OperateCommandWrite.cs +++ b/AerospikeClient/Command/OperateCommandWrite.cs @@ -45,10 +45,6 @@ protected internal override void ParseResult(IConnection conn) return; } - if (opCount > 0) { - throw new AerospikeException("Unexpected operate opCount on error: " + opCount + ',' + resultCode); - } - if (resultCode == ResultCode.FILTERED_OUT) { if (policy.failOnFilteredOut) diff --git a/AerospikeClient/Command/ReadCommand.cs b/AerospikeClient/Command/ReadCommand.cs index a4b00eb8..c8addc04 100644 --- a/AerospikeClient/Command/ReadCommand.cs +++ b/AerospikeClient/Command/ReadCommand.cs @@ -28,7 +28,6 @@ public ReadCommand(Cluster cluster, Policy policy, Key key) { this.binNames = null; this.isOperation = false; - cluster.AddCommand(); } public ReadCommand(Cluster cluster, Policy policy, Key key, String[] binNames) @@ -36,15 +35,13 @@ public ReadCommand(Cluster cluster, Policy policy, Key key, String[] binNames) { this.binNames = binNames; this.isOperation = false; - cluster.AddCommand(); } - public ReadCommand(Cluster cluster, Policy policy, Key key, Partition partition, bool isOperation) + public ReadCommand(Cluster cluster, Policy policy, Key key, bool isOperation) : base(cluster, policy, key) { this.binNames = null; this.isOperation = isOperation; - cluster.AddCommand(); } protected internal override void WriteBuffer() @@ -63,11 +60,6 @@ protected internal override void ParseResult(IConnection conn) return; } - if (opCount > 0) - { - throw new AerospikeException("Unexpected read opCount on error: " + opCount + ',' + resultCode); - } - if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { return; diff --git a/AerospikeClient/Command/ScanExecutor.cs b/AerospikeClient/Command/ScanExecutor.cs index d9cbe187..a50db933 100644 --- a/AerospikeClient/Command/ScanExecutor.cs +++ b/AerospikeClient/Command/ScanExecutor.cs @@ -24,7 +24,7 @@ public sealed class ScanExecutor { public static void ScanPartitions(Cluster cluster, ScanPolicy policy, string ns, string setName, string[] binNames, ScanCallback callback, PartitionTracker tracker) { - cluster.AddCommand(); + cluster.AddCommandCount(); while (true) { diff --git a/AerospikeClient/Command/SyncCommand.cs b/AerospikeClient/Command/SyncCommand.cs index 7333516a..4c426e7f 100644 --- a/AerospikeClient/Command/SyncCommand.cs +++ b/AerospikeClient/Command/SyncCommand.cs @@ -413,9 +413,9 @@ protected void ParseHeader(IConnection conn) dataOffset += 2; } - protected void ParseFields(Txn tran, Key key, bool hasWrite) + protected void ParseFields(Txn txn, Key key, bool hasWrite) { - if (tran == null) + if (txn == null) { SkipFields(fieldCount); return; @@ -447,15 +447,15 @@ protected void ParseFields(Txn tran, Key key, bool hasWrite) if (hasWrite) { - tran.OnWrite(key, version, resultCode); + txn.OnWrite(key, version, resultCode); } else { - tran.OnRead(key, version); + txn.OnRead(key, version); } } - protected void ParseTranDeadline(Txn txn) + protected void ParseTxnDeadline(Txn txn) { for (int i = 0; i < fieldCount; i++) { diff --git a/AerospikeClient/Command/SyncReadCommand.cs b/AerospikeClient/Command/SyncReadCommand.cs index dd258924..72916b3d 100644 --- a/AerospikeClient/Command/SyncReadCommand.cs +++ b/AerospikeClient/Command/SyncReadCommand.cs @@ -27,7 +27,7 @@ public SyncReadCommand(Cluster cluster, Policy policy, Key key) { this.key = key; this.partition = Partition.Read(cluster, policy, key); - cluster.AddCommand(); + cluster.AddCommandCount(); } protected internal override Node GetNode() diff --git a/AerospikeClient/Command/SyncWriteCommand.cs b/AerospikeClient/Command/SyncWriteCommand.cs index dd5cb31a..7cb46485 100644 --- a/AerospikeClient/Command/SyncWriteCommand.cs +++ b/AerospikeClient/Command/SyncWriteCommand.cs @@ -31,7 +31,7 @@ public SyncWriteCommand(Cluster cluster, WritePolicy writePolicy, Key key) this.writePolicy = writePolicy; this.key = key; this.partition = Partition.Write(cluster, writePolicy, key); - cluster.AddCommand(); + cluster.AddCommandCount(); } protected internal override bool IsWrite() diff --git a/AerospikeClient/Command/TouchCommand.cs b/AerospikeClient/Command/TouchCommand.cs index d2206286..d90a4632 100644 --- a/AerospikeClient/Command/TouchCommand.cs +++ b/AerospikeClient/Command/TouchCommand.cs @@ -22,7 +22,6 @@ public sealed class TouchCommand : SyncWriteCommand public TouchCommand(Cluster cluster, WritePolicy writePolicy, Key key) : base(cluster, writePolicy, key) { - cluster.AddCommand(); } protected internal override void WriteBuffer() diff --git a/AerospikeClient/Command/TxnAddKeys.cs b/AerospikeClient/Command/TxnAddKeys.cs index 85846e6c..e28e994f 100644 --- a/AerospikeClient/Command/TxnAddKeys.cs +++ b/AerospikeClient/Command/TxnAddKeys.cs @@ -35,7 +35,7 @@ protected internal override void WriteBuffer() protected internal override void ParseResult(IConnection conn) { ParseHeader(conn); - ParseTranDeadline(policy.Txn); + ParseTxnDeadline(policy.Txn); if (resultCode == ResultCode.OK) { diff --git a/AerospikeClient/Command/TxnClose.cs b/AerospikeClient/Command/TxnClose.cs index 2c8319b9..6b3fe880 100644 --- a/AerospikeClient/Command/TxnClose.cs +++ b/AerospikeClient/Command/TxnClose.cs @@ -19,17 +19,17 @@ namespace Aerospike.Client { public sealed class TxnClose : SyncWriteCommand { - private readonly Txn tran; + private readonly Txn txn; - public TxnClose(Cluster cluster, Txn tran, WritePolicy writePolicy, Key key) + public TxnClose(Cluster cluster, Txn txn, WritePolicy writePolicy, Key key) : base(cluster, writePolicy, key) { - this.tran = tran; + this.txn = txn; } protected internal override void WriteBuffer() { - SetTxnClose(tran, key); + SetTxnClose(txn, key); } protected internal override void ParseResult(IConnection conn) diff --git a/AerospikeClient/Command/TxnMarkRollForward.cs b/AerospikeClient/Command/TxnMarkRollForward.cs index 838ce457..e478f9f4 100644 --- a/AerospikeClient/Command/TxnMarkRollForward.cs +++ b/AerospikeClient/Command/TxnMarkRollForward.cs @@ -19,17 +19,17 @@ namespace Aerospike.Client { public sealed class TxnMarkRollForward : SyncWriteCommand { - private readonly Txn tran; + private readonly Txn txn; - public TxnMarkRollForward(Cluster cluster, Txn tran, WritePolicy writePolicy, Key key) + public TxnMarkRollForward(Cluster cluster, Txn txn, WritePolicy writePolicy, Key key) : base(cluster, writePolicy, key) { - this.tran = tran; + this.txn = txn; } protected internal override void WriteBuffer() { - SetTxnMarkRollForward(tran, key); + SetTxnMarkRollForward(txn, key); } protected internal override void ParseResult(IConnection conn) diff --git a/AerospikeClient/Command/TxnMonitor.cs b/AerospikeClient/Command/TxnMonitor.cs index c20873f3..7d5aabde 100644 --- a/AerospikeClient/Command/TxnMonitor.cs +++ b/AerospikeClient/Command/TxnMonitor.cs @@ -20,7 +20,7 @@ namespace Aerospike.Client public sealed class TxnMonitor { private static readonly ListPolicy OrderedListPolicy = new(ListOrder.ORDERED, - ListWriteFlags.ADD_UNIQUE | ListWriteFlags.NO_FAIL | ListWriteFlags.PARTIAL); + ListWriteFlags.ADD_UNIQUE | ListWriteFlags.NO_FAIL | ListWriteFlags.PARTIAL); private static readonly string BinNameId = "id"; private static readonly string BinNameDigests = "keyds"; @@ -35,19 +35,19 @@ public static void AddKey(Cluster cluster, WritePolicy policy, Key cmdKey) return; } - Operation[] ops = GetTranOps(txn, cmdKey); + Operation[] ops = GetTxnOps(txn, cmdKey); AddWriteKeys(cluster, policy, ops); } public static void AddKeys(Cluster cluster, BatchPolicy policy, Key[] keys) { - Operation[] ops = GetTranOps(policy.Txn, keys); + Operation[] ops = GetTxnOps(policy.Txn, keys); AddWriteKeys(cluster, policy, ops); } public static void AddKeys(Cluster cluster, BatchPolicy policy, List records) { - Operation[] ops = GetTranOps(policy.Txn, records); + Operation[] ops = GetTxnOps(policy.Txn, records); if (ops != null) { @@ -55,44 +55,44 @@ public static void AddKeys(Cluster cluster, BatchPolicy policy, List list = new(keys.Length); foreach (Key key in keys) { - tran.SetNamespace(key.ns); + txn.SetNamespace(key.ns); list.Add(Value.Get(key.digest)); } - return GetTranOps(tran, list); + return GetTxnOps(txn, list); } - public static Operation[] GetTranOps(Txn tran, List records) + public static Operation[] GetTxnOps(Txn txn, List records) { List list = new(records.Count); foreach (BatchRecord br in records) { - tran.SetNamespace(br.key.ns); + txn.SetNamespace(br.key.ns); if (br.hasWrite) { @@ -105,22 +105,22 @@ public static Operation[] GetTranOps(Txn tran, List records) // Readonly batch does not need to add key digests. return null; } - return GetTranOps(tran, list); + return GetTxnOps(txn, list); } - private static Operation[] GetTranOps(Txn tran, List list) + private static Operation[] GetTxnOps(Txn txn, List list) { - if (tran.Deadline == 0) + if (txn.MonitorExists()) { // No existing monitor record. return new Operation[] { - Operation.Put(new Bin(BinNameId, tran.Id)), ListOperation.AppendItems(OrderedListPolicy, BinNameDigests, list) }; } else { return new Operation[] { + Operation.Put(new Bin(BinNameId, txn.Id)), ListOperation.AppendItems(OrderedListPolicy, BinNameDigests, list) }; } @@ -128,16 +128,16 @@ private static Operation[] GetTranOps(Txn tran, List list) private static void AddWriteKeys(Cluster cluster, Policy policy, Operation[] ops) { - Key tranKey = GetTxnMonitorKey(policy.Txn); + Key txnKey = GetTxnMonitorKey(policy.Txn); WritePolicy wp = CopyTimeoutPolicy(policy); OperateArgs args = new(wp, null, null, ops); - TxnAddKeys cmd = new(cluster, tranKey, args); + TxnAddKeys cmd = new(cluster, txnKey, args); cmd.Execute(); } - public static Key GetTxnMonitorKey(Txn tran) + public static Key GetTxnMonitorKey(Txn txn) { - return new Key(tran.Ns, " keySet = txn.Writes; - if (keySet.Count != 0) + if (txn.MonitorExists()) { // Tell MRT monitor that a roll-forward will commence. try @@ -101,11 +100,11 @@ public CommitStatusType Commit(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) } catch (Exception t) { - throw new AerospikeException.Commit(CommitErrorType.ROLL_FORWARD_ABANDONED, verifyRecords, rollRecords, t); + return CommitStatusType.ROLL_FORWARD_ABANDONED; } } - if (txn.Deadline != 0) + if (txn.MonitorMightExist()) { // Remove MRT monitor. try @@ -123,21 +122,16 @@ public CommitStatusType Commit(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) public AbortStatusType Abort(BatchPolicy rollPolicy) { - HashSet keySet = txn.Writes; - - if (keySet.Count != 0) + try { - try - { - Roll(rollPolicy, Command.INFO4_MRT_ROLL_BACK); - } - catch (Exception) - { - return AbortStatusType.ROLL_BACK_ABANDONED; - } + Roll(rollPolicy, Command.INFO4_MRT_ROLL_BACK); + } + catch (Exception) + { + return AbortStatusType.ROLL_BACK_ABANDONED; } - if (txn.Deadline != 0) + if (txn.MonitorMightExist()) { try { @@ -224,18 +218,18 @@ private void Roll(BatchPolicy rollPolicy, int txnAttr) this.rollRecords = records; - // Copy txn roll policy because it needs to be modified. + // Copy transaction roll policy because it needs to be modified. BatchPolicy batchPolicy = new(rollPolicy); BatchAttr attr = new(); attr.SetTxn(txnAttr); BatchStatus status = new(true); - // generate() requires a null txn instance. + // generate() requires a null transaction instance. List bns = BatchNode.GenerateList(cluster, batchPolicy, keys, records, true, status); BatchCommand[] commands = new BatchCommand[bns.Count]; - // Batch roll forward requires the txn instance. + // Batch roll forward requires the transaction instance. batchPolicy.Txn = txn; int count = 0; diff --git a/AerospikeClient/Command/WriteCommand.cs b/AerospikeClient/Command/WriteCommand.cs index 4363427b..3786edb5 100644 --- a/AerospikeClient/Command/WriteCommand.cs +++ b/AerospikeClient/Command/WriteCommand.cs @@ -27,7 +27,6 @@ public WriteCommand(Cluster cluster, WritePolicy writePolicy, Key key, Bin[] bin { this.bins = bins; this.operation = operation; - cluster.AddCommand(); } protected internal override void WriteBuffer() diff --git a/AerospikeClient/Listener/AbortListener.cs b/AerospikeClient/Listener/AbortListener.cs index 4e7f29b5..b5bb878b 100644 --- a/AerospikeClient/Listener/AbortListener.cs +++ b/AerospikeClient/Listener/AbortListener.cs @@ -24,14 +24,8 @@ namespace Aerospike.Client public interface AbortListener { /// - /// This method is called when the abort succeeds. + /// This method is called when the abort succeeded or will succeed. /// void OnSuccess(AbortStatusType status); - - /// - /// This method is called when the abort fails. - /// - /// error that occurred - void OnFailure(AerospikeException exception); } } diff --git a/AerospikeClient/Listener/CommitListener.cs b/AerospikeClient/Listener/CommitListener.cs index 6e32a04d..fdca6e2b 100644 --- a/AerospikeClient/Listener/CommitListener.cs +++ b/AerospikeClient/Listener/CommitListener.cs @@ -32,6 +32,6 @@ public interface CommitListener /// This method is called when the commit fails. /// /// error that occurred - void OnFailure(AerospikeException exception); + void OnFailure(AerospikeException.Commit exception); } } diff --git a/AerospikeClient/Main/AerospikeClient.cs b/AerospikeClient/Main/AerospikeClient.cs index d656a1bf..64ad53fd 100644 --- a/AerospikeClient/Main/AerospikeClient.cs +++ b/AerospikeClient/Main/AerospikeClient.cs @@ -230,6 +230,7 @@ protected internal AerospikeClient(ClientPolicy policy) this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault; this.txnRollPolicyDefault = policy.txnRollPolicyDefault; this.infoPolicyDefault = policy.infoPolicyDefault; + this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); } else { @@ -237,7 +238,7 @@ protected internal AerospikeClient(ClientPolicy policy) this.writePolicyDefault = new WritePolicy(); this.scanPolicyDefault = new ScanPolicy(); this.queryPolicyDefault = new QueryPolicy(); - this.batchPolicyDefault = BatchPolicy.ReadDefault(); + this.batchPolicyDefault = new BatchPolicy(); this.batchParentPolicyWriteDefault = BatchPolicy.WriteDefault(); this.batchWritePolicyDefault = new BatchWritePolicy(); this.batchDeletePolicyDefault = new BatchDeletePolicy(); @@ -465,7 +466,7 @@ public ClusterStats GetClusterStats() /// /// Attempt to commit the given multi-record transaction. First, the expected record versions are /// sent to the server nodes for verification. If all nodes return success, the transaction is - /// committed.Otherwise, the transaction is aborted. + /// committed. Otherwise, the transaction is aborted. ///

/// Requires server version 8.0+ ///

@@ -478,8 +479,8 @@ public CommitStatus.CommitStatusType Commit(Txn txn) return CommitStatus.CommitStatusType.ALREADY_ATTEMPTED; } - TxnRoll tm = new TxnRoll(cluster, txn); - return tm.Commit(txnVerifyPolicyDefault, txnRollPolicyDefault); + TxnRoll tr = new(cluster, txn); + return tr.Commit(txnVerifyPolicyDefault, txnRollPolicyDefault); } /// @@ -496,8 +497,8 @@ public AbortStatus.AbortStatusType Abort(Txn txn) return AbortStatus.AbortStatusType.ALREADY_ATTEMPTED; } - TxnRoll tm = new TxnRoll(cluster, txn); - return tm.Abort(txnRollPolicyDefault); + TxnRoll tr = new(cluster, txn); + return tr.Abort(txnRollPolicyDefault); } //------------------------------------------------------- @@ -1839,7 +1840,7 @@ public ExecuteTask Execute(WritePolicy policy, Statement statement, string packa statement.FunctionName = functionName; statement.FunctionArgs = functionArgs; - cluster.AddCommand(); + cluster.AddCommandCount(); ulong taskId = statement.PrepareTaskId(); Node[] nodes = cluster.ValidateNodes(); @@ -1878,7 +1879,7 @@ public ExecuteTask Execute(WritePolicy policy, Statement statement, params Opera statement.Operations = operations; } - cluster.AddCommand(); + cluster.AddCommandCount(); ulong taskId = statement.PrepareTaskId(); Node[] nodes = cluster.ValidateNodes(); diff --git a/AerospikeClient/Main/AerospikeException.cs b/AerospikeClient/Main/AerospikeException.cs index 312fd4ae..62d7898f 100644 --- a/AerospikeClient/Main/AerospikeException.cs +++ b/AerospikeClient/Main/AerospikeException.cs @@ -625,7 +625,7 @@ public sealed class Commit : AerospikeException public readonly BatchRecord[] RollRecords; public Commit(CommitErrorType error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords) - : base(ResultCode.TRAN_FAILED, CommitErrorToString(error)) + : base(ResultCode.TXN_FAILED, CommitErrorToString(error)) { this.Error = error; this.VerifyRecords = verifyRecords; @@ -633,7 +633,7 @@ public Commit(CommitErrorType error, BatchRecord[] verifyRecords, BatchRecord[] } public Commit(CommitErrorType error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords, Exception cause) - : base(ResultCode.TRAN_FAILED, CommitErrorToString(error), cause) + : base(ResultCode.TXN_FAILED, CommitErrorToString(error), cause) { this.Error = error; this.VerifyRecords = verifyRecords; @@ -655,49 +655,6 @@ public override string Message } } - /// - /// Exception thrown when {@link AerospikeClient#abort(com.aerospike.client.Txn)} fails. - /// - public sealed class Abort : AerospikeException - { - /// - /// Error status of the attempted abort. - /// - public readonly AbortStatusType Error; - - /// - /// Roll backward result for each write key in the MRT. May be null if failure occurred before roll backward. - /// - public readonly BatchRecord[] RollRecords; - - public Abort(AbortStatusType error, BatchRecord[] rollRecords) - : base(ResultCode.TRAN_FAILED, AbortErrorToString(error)) - { - this.Error = error; - this.RollRecords = rollRecords; - } - - public Abort(AbortStatusType error, BatchRecord[] rollRecords, Exception cause) - : base(ResultCode.TRAN_FAILED, AbortErrorToString(error), cause) - { - this.Error = error; - this.RollRecords = rollRecords; - } - - /// - /// Get Commit message with records. - /// - public override string Message - { - get - { - StringBuilder sb = new(1024); - RecordsToString(sb, "roll errors:", RollRecords); - return BaseMessage + sb.ToString(); - } - } - } - private static void RecordsToString(StringBuilder sb, String title, BatchRecord[] records) { if (records == null) diff --git a/AerospikeClient/Main/CommitError.cs b/AerospikeClient/Main/CommitError.cs index 6a166eb5..8fc6fb8b 100644 --- a/AerospikeClient/Main/CommitError.cs +++ b/AerospikeClient/Main/CommitError.cs @@ -27,9 +27,7 @@ public enum CommitErrorType VERIFY_FAIL, VERIFY_FAIL_CLOSE_ABANDONED, VERIFY_FAIL_ABORT_ABANDONED, - MARK_ROLL_FORWARD_ABANDONED, - ROLL_FORWARD_ABANDONED, - CLOSE_ABANDONED + MARK_ROLL_FORWARD_ABANDONED } public static string CommitErrorToString(CommitErrorType type) @@ -40,8 +38,6 @@ public static string CommitErrorToString(CommitErrorType type) CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED => "MRT verify failed. MRT aborted. MRT client close abandoned. Server will eventually close the MRT.", CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED => "MRT verify failed. MRT client abort abandoned. Server will eventually abort the MRT.", CommitErrorType.MARK_ROLL_FORWARD_ABANDONED => "MRT client mark roll forward abandoned. Server will eventually abort the MRT.", - CommitErrorType.ROLL_FORWARD_ABANDONED => "MRT client roll forward abandoned. Server will eventually commit the MRT.", - CommitErrorType.CLOSE_ABANDONED => "MRT has been rolled forward, but MRT client close was abandoned. Server will eventually close the MRT.", _ => "Unexpected CommitErrorType" }; } diff --git a/AerospikeClient/Main/CommitStatus.cs b/AerospikeClient/Main/CommitStatus.cs index 021b88f7..31ba95ad 100644 --- a/AerospikeClient/Main/CommitStatus.cs +++ b/AerospikeClient/Main/CommitStatus.cs @@ -39,7 +39,7 @@ public static string CommitErrorToString(CommitStatusType status) CommitStatusType.OK => "Commit succeeded.", CommitStatusType.ALREADY_ATTEMPTED => "Commit or abort already attempted.", CommitStatusType.ROLL_FORWARD_ABANDONED => "MRT client roll forward abandoned. Server will eventually commit the MRT.", - CommitStatusType.CLOSE_ABANDONED => "MRT has been rolled back, but MRT client close was abandoned. Server will eventually close the MRT.", + CommitStatusType.CLOSE_ABANDONED => "MRT has been rolled forward, but MRT client close was abandoned. Server will eventually close the MRT.", _ => "Unexpected AbortStatusType." }; } diff --git a/AerospikeClient/Main/IAerospikeClient.cs b/AerospikeClient/Main/IAerospikeClient.cs index 5adfca00..c9c6ee2f 100644 --- a/AerospikeClient/Main/IAerospikeClient.cs +++ b/AerospikeClient/Main/IAerospikeClient.cs @@ -135,7 +135,7 @@ public interface IAerospikeClient /// /// Attempt to commit the given multi-record transaction. First, the expected record versions are /// sent to the server nodes for verification.If all nodes return success, the command is - /// committed.Otherwise, the command is aborted. + /// committed. Otherwise, the transaction is aborted. ///

/// Requires server version 8.0+ ///

diff --git a/AerospikeClient/Main/Key.cs b/AerospikeClient/Main/Key.cs index f929545b..944079fc 100644 --- a/AerospikeClient/Main/Key.cs +++ b/AerospikeClient/Main/Key.cs @@ -369,6 +369,7 @@ public Key(string ns, byte[] digest, string setName, Value userKey) this.ns = ns; this.digest = digest; this.setName = setName; + // Do not try to validate userKey type because it is most likely null. this.userKey = userKey; } diff --git a/AerospikeClient/Main/ResultCode.cs b/AerospikeClient/Main/ResultCode.cs index edd6e470..ce1159fe 100644 --- a/AerospikeClient/Main/ResultCode.cs +++ b/AerospikeClient/Main/ResultCode.cs @@ -25,7 +25,7 @@ public sealed class ResultCode /// Multi-record transaction failed. /// Value: -17 ///
- public const int TRAN_FAILED = -17; + public const int TXN_FAILED = -17; /// /// One or more keys failed in a batch. @@ -566,7 +566,7 @@ public static string GetResultString(int resultCode) { switch (resultCode) { - case TRAN_FAILED: + case TXN_FAILED: return "Multi-record transaction failed"; case BATCH_FAILED: @@ -693,13 +693,13 @@ public static string GetResultString(int resultCode) return "Command filtered out"; case LOST_CONFLICT: - return "Transaction failed due to conflict with XDR"; + return "Command failed due to conflict with XDR"; case MRT_BLOCKED: return "MRT record blocked by a different transaction"; case MRT_CONFLICT: - return "MRT verify failed"; + return "MRT read verify failed"; case MRT_EXPIRED: return "MRT expired"; diff --git a/AerospikeClient/Metrics/LatencyBuckets.cs b/AerospikeClient/Metrics/LatencyBuckets.cs index 392ee518..d5eeee52 100644 --- a/AerospikeClient/Metrics/LatencyBuckets.cs +++ b/AerospikeClient/Metrics/LatencyBuckets.cs @@ -18,7 +18,7 @@ namespace Aerospike.Client { /// - /// Latency buckets for a transaction group (See {@link com.aerospike.client.metrics.LatencyType}). + /// Latency buckets for a command group (See {@link com.aerospike.client.metrics.LatencyType}). /// Latency bucket counts are cumulative and not reset on each metrics snapshot interval. /// public sealed class LatencyBuckets diff --git a/AerospikeClient/Policy/BatchDeletePolicy.cs b/AerospikeClient/Policy/BatchDeletePolicy.cs index 6425a1d9..9d99775a 100644 --- a/AerospikeClient/Policy/BatchDeletePolicy.cs +++ b/AerospikeClient/Policy/BatchDeletePolicy.cs @@ -37,7 +37,7 @@ public sealed class BatchDeletePolicy public Expression filterExp; /// - /// Desired consistency guarantee when committing a transaction on the server. The default + /// Desired consistency guarantee when committing a command on the server. The default /// (COMMIT_ALL) indicates that the server should wait for master and all replica commits to /// be successful before returning success to the client. /// @@ -64,7 +64,7 @@ public sealed class BatchDeletePolicy public int generation; /// - /// If the transaction results in a record deletion, leave a tombstone for the record. + /// If the command results in a record deletion, leave a tombstone for the record. /// This prevents deleted records from reappearing after node failures. /// Valid for Aerospike Server Enterprise Edition only. /// diff --git a/AerospikeClient/Policy/BatchPolicy.cs b/AerospikeClient/Policy/BatchPolicy.cs index 0e040327..79a039f9 100644 --- a/AerospikeClient/Policy/BatchPolicy.cs +++ b/AerospikeClient/Policy/BatchPolicy.cs @@ -44,7 +44,7 @@ public class BatchPolicy : Policy ///
    ///
  • /// 1 (default): Issue batch node requests sequentially. This mode has a performance advantage - /// for small batch sizes because requests can be issued in the main transaction thread without + /// for small batch sizes because requests can be issued in the main command thread without /// using a thread pool. This mode is not optimal for batch requests spread out over many nodes /// in a large cluster. ///
  • @@ -71,7 +71,7 @@ public class BatchPolicy : Policy /// Allow batch to be processed immediately in the server's receiving thread for in-memory /// namespaces. If false, the batch will always be processed in separate service threads. /// - /// For batch transactions with smaller sized records (<= 1K per record), inline + /// For batch commands with smaller sized records (<= 1K per record), inline /// processing will be significantly faster on in-memory namespaces. /// /// diff --git a/AerospikeClient/Policy/BatchUDFPolicy.cs b/AerospikeClient/Policy/BatchUDFPolicy.cs index 35f79358..600cfc13 100644 --- a/AerospikeClient/Policy/BatchUDFPolicy.cs +++ b/AerospikeClient/Policy/BatchUDFPolicy.cs @@ -37,7 +37,7 @@ public sealed class BatchUDFPolicy public Expression filterExp; /// - /// Desired consistency guarantee when committing a transaction on the server. The default + /// Desired consistency guarantee when committing a command on the server. The default /// (COMMIT_ALL) indicates that the server should wait for master and all replica commits to /// be successful before returning success to the client. /// @@ -63,7 +63,7 @@ public sealed class BatchUDFPolicy public int expiration; /// - /// If the transaction results in a record deletion, leave a tombstone for the record. + /// If the command results in a record deletion, leave a tombstone for the record. /// This prevents deleted records from reappearing after node failures. /// Valid for Aerospike Server Enterprise Edition only. /// diff --git a/AerospikeClient/Policy/BatchWritePolicy.cs b/AerospikeClient/Policy/BatchWritePolicy.cs index e4d77e6e..7618cc04 100644 --- a/AerospikeClient/Policy/BatchWritePolicy.cs +++ b/AerospikeClient/Policy/BatchWritePolicy.cs @@ -45,7 +45,7 @@ public sealed class BatchWritePolicy public RecordExistsAction recordExistsAction = RecordExistsAction.UPDATE; /// - /// Desired consistency guarantee when committing a transaction on the server. The default + /// Desired consistency guarantee when committing a command on the server. The default /// (COMMIT_ALL) indicates that the server should wait for master and all replica commits to /// be successful before returning success to the client. /// @@ -88,7 +88,7 @@ public sealed class BatchWritePolicy public int expiration; /// - /// If the transaction results in a record deletion, leave a tombstone for the record. + /// If the command results in a record deletion, leave a tombstone for the record. /// This prevents deleted records from reappearing after node failures. /// Valid for Aerospike Server Enterprise Edition only. /// diff --git a/AerospikeClient/Policy/ClientPolicy.cs b/AerospikeClient/Policy/ClientPolicy.cs index 2e9c5edd..d3880cff 100644 --- a/AerospikeClient/Policy/ClientPolicy.cs +++ b/AerospikeClient/Policy/ClientPolicy.cs @@ -79,7 +79,7 @@ public class ClientPolicy public int minConnsPerNode; /// - /// Maximum number of synchronous connections allowed per server node. Transactions will go + /// Maximum number of synchronous connections allowed per server node. Commands will go /// through retry logic and potentially fail with "ResultCode.NO_MORE_CONNECTIONS" if the maximum /// number of connections would be exceeded. /// @@ -120,7 +120,7 @@ public class ClientPolicy /// /// /// If server's proto-fd-idle-ms is zero (no reap), then maxSocketIdle should also be zero. - /// Connections retrieved from a pool in transactions will not be checked for maxSocketIdle + /// Connections retrieved from a pool in commands will not be checked for maxSocketIdle /// when maxSocketIdle is zero. Idle connections will still be trimmed down from peak /// connections to min connections (minConnsPerNode and asyncMinConnsPerNode) using a /// hard-coded 55 second limit in the cluster tend thread. diff --git a/AerospikeClient/Policy/CommitLevel.cs b/AerospikeClient/Policy/CommitLevel.cs index 1be4761f..aa4f5240 100644 --- a/AerospikeClient/Policy/CommitLevel.cs +++ b/AerospikeClient/Policy/CommitLevel.cs @@ -17,7 +17,7 @@ namespace Aerospike.Client { /// - /// Desired consistency guarantee when committing a transaction on the server. + /// Desired consistency guarantee when committing a command on the server. /// public enum CommitLevel { diff --git a/AerospikeClient/Policy/Policy.cs b/AerospikeClient/Policy/Policy.cs index cd1962e9..68c11f0b 100644 --- a/AerospikeClient/Policy/Policy.cs +++ b/AerospikeClient/Policy/Policy.cs @@ -21,12 +21,13 @@ namespace Aerospike.Client { /// - /// Transaction policy attributes used in all database commands. + /// Command policy attributes used in all database commands. /// public class Policy { /// - /// Multi-record transaction identifier. + /// Multi-record transaction identifier (MRT). If this field is populated, the corresponding + /// command will be included in the MRT. This field is ignored for scan/query. /// /// Default: null /// @@ -60,7 +61,7 @@ public class Policy /// /// Optional expression filter. If filterExp exists and evaluates to false, the - /// transaction is ignored. + /// command is ignored. /// /// Default: null /// @@ -84,7 +85,7 @@ public class Policy /// /// If socketTimeout is not zero and the socket has been idle for at least socketTimeout, /// both maxRetries and totalTimeout are checked. If maxRetries and totalTimeout are not - /// exceeded, the transaction is retried. + /// exceeded, the command is retried. /// /// /// For synchronous methods, socketTimeout is the socket SendTimeout and ReceiveTimeout. @@ -98,15 +99,15 @@ public class Policy public int socketTimeout = 30000; /// - /// Total transaction timeout in milliseconds. + /// Total command timeout in milliseconds. /// /// The totalTimeout is tracked on the client and sent to the server along with - /// the transaction in the wire protocol. The client will most likely timeout - /// first, but the server also has the capability to timeout the transaction. + /// the command in the wire protocol. The client will most likely timeout + /// first, but the server also has the capability to timeout the command. /// /// - /// If totalTimeout is not zero and totalTimeout is reached before the transaction - /// completes, the transaction will abort with + /// If totalTimeout is not zero and totalTimeout is reached before the command + /// completes, the command will abort with /// . /// /// @@ -119,11 +120,11 @@ public class Policy /// /// Delay milliseconds after socket read timeout in an attempt to recover the socket - /// in the background. Processing continues on the original transaction and the user - /// is still notified at the original transaction timeout. + /// in the background. Processing continues on the original command and the user + /// is still notified at the original command timeout. /// - /// When a transaction is stopped prematurely, the socket must be drained of all incoming - /// data or closed to prevent unread socket data from corrupting the next transaction + /// When a command is stopped prematurely, the socket must be drained of all incoming + /// data or closed to prevent unread socket data from corrupting the next command /// that would use that socket. /// /// @@ -144,7 +145,7 @@ public class Policy /// /// /// The disadvantage of enabling timeoutDelay is that extra memory/processing is required - /// to drain sockets and additional connections may still be needed for transaction retries. + /// to drain sockets and additional connections may still be needed for command retries. /// /// /// If timeoutDelay were to be enabled, 3000ms would be a reasonable value. @@ -156,16 +157,16 @@ public class Policy public int TimeoutDelay = 0; /// - /// Maximum number of retries before aborting the current transaction. + /// Maximum number of retries before aborting the current command. /// The initial attempt is not counted as a retry. /// - /// If maxRetries is exceeded, the transaction will abort with + /// If maxRetries is exceeded, the command will abort with /// . /// /// /// WARNING: Database writes that are not idempotent (such as Add()) /// should not be retried because the write operation may be performed - /// multiple times if the client timed out previous transaction attempts. + /// multiple times if the client timed out previous command attempts. /// It's important to use a distinct WritePolicy for non-idempotent /// writes which sets maxRetries = 0; /// @@ -209,7 +210,7 @@ public class Policy /// Determine how record TTL (time to live) is affected on reads. When enabled, the server can /// efficiently operate as a read-based LRU cache where the least recently used records are expired. /// The value is expressed as a percentage of the TTL sent on the most recent write such that a read - /// within this interval of the record’s end of life will generate a touch. + /// within this interval of the record�s end of life will generate a touch. /// /// For example, if the most recent write had a TTL of 10 hours and read_touch_ttl_percent is set to /// 80, the next read within 8 hours of the record's end of life (equivalent to 2 hours after the most @@ -259,7 +260,7 @@ public class Policy /// /// Throw exception if is defined and that filter evaluates - /// to false (transaction ignored). The + /// to false (command ignored). The /// will contain result code . /// /// This field is not applicable to batch, scan or query commands. diff --git a/AerospikeClient/Policy/QueryPolicy.cs b/AerospikeClient/Policy/QueryPolicy.cs index 49f39f6f..501876f9 100644 --- a/AerospikeClient/Policy/QueryPolicy.cs +++ b/AerospikeClient/Policy/QueryPolicy.cs @@ -21,6 +21,10 @@ namespace Aerospike.Client { /// /// Container object for policy attributes used in query operations. + /// + /// Inherited Policy fields and + /// are ignored. + /// /// public class QueryPolicy : Policy { diff --git a/AerospikeClient/Policy/ScanPolicy.cs b/AerospikeClient/Policy/ScanPolicy.cs index 4684524b..fba0b129 100644 --- a/AerospikeClient/Policy/ScanPolicy.cs +++ b/AerospikeClient/Policy/ScanPolicy.cs @@ -21,6 +21,10 @@ namespace Aerospike.Client { /// /// Container object for optional parameters used in scan operations. + /// + /// Inherited Policy fields and + /// are ignored. + /// /// public sealed class ScanPolicy : Policy { diff --git a/AerospikeClient/Policy/WritePolicy.cs b/AerospikeClient/Policy/WritePolicy.cs index 3fd1247e..ce18b23c 100644 --- a/AerospikeClient/Policy/WritePolicy.cs +++ b/AerospikeClient/Policy/WritePolicy.cs @@ -40,7 +40,7 @@ public sealed class WritePolicy : Policy public GenerationPolicy generationPolicy = GenerationPolicy.NONE; /// - /// Desired consistency guarantee when committing a transaction on the server. The default + /// Desired consistency guarantee when committing a command on the server. The default /// (COMMIT_ALL) indicates that the server should wait for master and all replica commits to /// be successful before returning success to the client. /// Default: CommitLevel.COMMIT_ALL @@ -93,7 +93,7 @@ public sealed class WritePolicy : Policy public bool respondAllOps; /// - /// If the transaction results in a record deletion, leave a tombstone for the record. + /// If the command results in a record deletion, leave a tombstone for the record. /// This prevents deleted records from reappearing after node failures. /// Valid for Aerospike Server Enterprise Edition 3.10+ only. /// Default: false (do not tombstone deleted records). diff --git a/AerospikeClient/Query/QueryExecutor.cs b/AerospikeClient/Query/QueryExecutor.cs index 0fc72986..4ad3e026 100644 --- a/AerospikeClient/Query/QueryExecutor.cs +++ b/AerospikeClient/Query/QueryExecutor.cs @@ -45,7 +45,7 @@ public QueryExecutor(Cluster cluster, QueryPolicy policy, Statement statement, N // Initialize maximum number of nodes to query in parallel. this.maxConcurrentNodes = (policy.maxConcurrentNodes == 0 || policy.maxConcurrentNodes >= threads.Length) ? threads.Length : policy.maxConcurrentNodes; - cluster.AddCommand(); + cluster.AddCommandCount(); } protected internal void InitializeThreads() diff --git a/AerospikeClient/Query/QueryListenerExecutor.cs b/AerospikeClient/Query/QueryListenerExecutor.cs index b215e835..bdc6f6c4 100644 --- a/AerospikeClient/Query/QueryListenerExecutor.cs +++ b/AerospikeClient/Query/QueryListenerExecutor.cs @@ -29,7 +29,7 @@ public static void execute PartitionTracker tracker ) { - cluster.AddCommand(); + cluster.AddCommandCount(); ulong taskId = statement.PrepareTaskId(); diff --git a/AerospikeClient/Query/QueryPartitionExecutor.cs b/AerospikeClient/Query/QueryPartitionExecutor.cs index f970055b..4bd5b6d8 100644 --- a/AerospikeClient/Query/QueryPartitionExecutor.cs +++ b/AerospikeClient/Query/QueryPartitionExecutor.cs @@ -51,7 +51,7 @@ PartitionTracker tracker this.cancel = new CancellationTokenSource(); this.tracker = tracker; this.recordSet = new RecordSet(this, policy.recordQueueSize, cancel.Token); - cluster.AddCommand(); + cluster.AddCommandCount(); ThreadPool.UnsafeQueueUserWorkItem(this.Run, null); } diff --git a/AerospikeClient/Query/RecordSet.cs b/AerospikeClient/Query/RecordSet.cs index 626df355..26a172ae 100644 --- a/AerospikeClient/Query/RecordSet.cs +++ b/AerospikeClient/Query/RecordSet.cs @@ -198,7 +198,7 @@ internal void Abort() { valid = false; - // Send end command to transaction thread. + // Send end command to command thread. // It's critical that the end token add succeeds. while (!queue.TryAdd(END)) { diff --git a/AerospikeClient/Query/ResultSet.cs b/AerospikeClient/Query/ResultSet.cs index 9f5b7f1b..a80481db 100644 --- a/AerospikeClient/Query/ResultSet.cs +++ b/AerospikeClient/Query/ResultSet.cs @@ -189,7 +189,7 @@ internal void Abort() { valid = false; - // Send end command to transaction thread. + // Send end command to command thread. // It's critical that the end token add succeeds. while (!queue.TryAdd(END)) { diff --git a/AerospikeClientProxy/Proxy/AerospikeClientProxy.cs b/AerospikeClientProxy/Proxy/AerospikeClientProxy.cs index 2275256f..4250c33e 100644 --- a/AerospikeClientProxy/Proxy/AerospikeClientProxy.cs +++ b/AerospikeClientProxy/Proxy/AerospikeClientProxy.cs @@ -462,14 +462,14 @@ private string GetVersion() /// /// Attempt to commit the given multi-record transaction. First, the expected record versions are - /// sent to the server nodes for verification.If all nodes return success, the transaction is - /// committed.Otherwise, the transaction is aborted. + /// sent to the server nodes for verification. If all nodes return success, the transaction is + /// committed. Otherwise, the transaction is aborted. ///

    /// Requires server version 8.0+ ///

    ///
    - /// multi-record transaction - public CommitStatus.CommitStatusType Commit(Txn tran) + /// multi-record transaction + public CommitStatus.CommitStatusType Commit(Txn txn) { return CommitStatus.CommitStatusType.OK; } @@ -480,8 +480,8 @@ public CommitStatus.CommitStatusType Commit(Txn tran) /// Requires server version 8.0+ ///

    ///
    - /// multi-record transaction - public AbortStatus.AbortStatusType Abort(Txn tran) + /// multi-record transaction + public AbortStatus.AbortStatusType Abort(Txn txn) { return AbortStatus.AbortStatusType.OK; } @@ -492,7 +492,7 @@ public AbortStatus.AbortStatusType Abort(Txn tran) /// /// Write record bin(s). - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// write configuration parameters, pass in null for defaults @@ -513,7 +513,7 @@ public void Put(WritePolicy policy, Key key, params Bin[] bins) /// /// Append bin string values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for string values. /// @@ -531,7 +531,7 @@ public void Append(WritePolicy policy, Key key, params Bin[] bins) /// /// Prepend bin string values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call works only for string values. /// @@ -553,7 +553,7 @@ public void Prepend(WritePolicy policy, Key key, params Bin[] bins) /// /// Add integer/double bin values to existing record bin values. - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// write configuration parameters, pass in null for defaults @@ -575,7 +575,7 @@ public void Add(WritePolicy policy, Key key, params Bin[] bins) /// /// Delete record for specified key. /// Return whether record existed on server before deletion. - /// The policy specifies the transaction timeout. + /// The policy specifies the command timeout. /// /// delete configuration parameters, pass in null for defaults /// unique record identifier diff --git a/AerospikeClientProxy/Proxy/AsyncClientProxy.cs b/AerospikeClientProxy/Proxy/AsyncClientProxy.cs index b40b4cc5..ee041fa9 100644 --- a/AerospikeClientProxy/Proxy/AsyncClientProxy.cs +++ b/AerospikeClientProxy/Proxy/AsyncClientProxy.cs @@ -118,7 +118,7 @@ public AsyncClientProxy(AsyncClientPolicy policy, params Host[] hosts) /// /// Asynchronously attempt to commit the given multi-record transaction. First, the expected /// record versions are sent to the server nodes for verification.If all nodes return success, - /// the transaction is committed.Otherwise, the transaction is aborted. + /// the transaction is committed. Otherwise, the transaction is aborted. ///

    /// This method registers the command with an event loop and returns. /// The event loop thread will process the command and send the results to the listener. @@ -154,7 +154,7 @@ public void Abort(AbortListener listener, Txn txn) ///

    /// Asynchronously write record bin(s). /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -193,7 +193,7 @@ public void Put(WritePolicy policy, WriteListener listener, Key key, params Bin[ /// /// Asynchronously append bin string values to existing record bin values. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call only works for string values. /// @@ -229,7 +229,7 @@ public void Append(WritePolicy policy, WriteListener listener, Key key, params B /// /// Asynchronously prepend bin string values to existing record bin values. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// This call works only for string values. /// @@ -269,7 +269,7 @@ public void Prepend(WritePolicy policy, WriteListener listener, Key key, params /// /// Asynchronously add integer/double bin values to existing record bin values. /// - /// The policy specifies the transaction timeout, record expiration and how the transaction is + /// The policy specifies the command timeout, record expiration and how the command is /// handled when the record already exists. /// /// @@ -1061,7 +1061,7 @@ public void Operate(WritePolicy policy, RecordListener listener, Key key, params /// batch configuration parameters, pass in null for defaults /// cancellation token /// list of unique record identifiers and read/write operations - /// Task with completion status: true if all batch sub-transactions were successful. + /// Task with completion status: true if all batch sub-commands were successful. /// if queue is full public async Task Operate(BatchPolicy policy, CancellationToken token, List records) { diff --git a/AerospikeTest/Async/TestAsync.cs b/AerospikeTest/Async/TestAsync.cs index 1057fa06..a95ab316 100644 --- a/AerospikeTest/Async/TestAsync.cs +++ b/AerospikeTest/Async/TestAsync.cs @@ -17,6 +17,7 @@ using Aerospike.Client; using Aerospike.Client.Proxy; using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; namespace Aerospike.Test { @@ -82,6 +83,30 @@ public bool AssertBinEqual(Key key, Record record, string binName, int expected) return true; } + public bool AssertBatchEqual(Key[] keys, Record[] recs, String binName, int expected) + { + for (int i = 0; i < keys.Length; i++) + { + Key key = keys[i]; + Record rec = recs[i]; + + if (rec == null) + { + monitor.SetError(new Exception("recs[" + i + "] is null")); + return false; + } + + int received = rec.GetInt(binName); + + if (expected != received) + { + monitor.SetError(new Exception("Data mismatch: Expected " + expected + ". Received[" + i + "] " + received)); + return false; + } + } + return true; + } + public bool AssertRecordFound(Key key, Record record) { if (record == null) @@ -92,6 +117,16 @@ public bool AssertRecordFound(Key key, Record record) return true; } + public bool AssertRecordNotFound(Key key, Record record) + { + if (record != null) + { + monitor.SetError(new Exception("Record should not exist: namespace=" + args.ns + " set=" + args.set + " key=" + key.userKey)); + return false; + } + return true; + } + public bool AssertBetween(long begin, long end, long value) { if (!(value >= begin && value <= end)) diff --git a/AerospikeTest/Async/TestAsyncTxn.cs b/AerospikeTest/Async/TestAsyncTxn.cs index da53dadc..47319d72 100644 --- a/AerospikeTest/Async/TestAsyncTxn.cs +++ b/AerospikeTest/Async/TestAsyncTxn.cs @@ -18,6 +18,8 @@ using Aerospike.Client; using System.Reflection; using System.Text; +using static Aerospike.Client.CommitStatus; +using static Aerospike.Client.AbortStatus; namespace Aerospike.Test { @@ -42,89 +44,53 @@ public void AsyncTxnWrite() { Key key = new(args.ns, args.set, "asyncTxnWrite"); - client.Put(null, key, new Bin(binName, "val1")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; - client.Put(wp, key, new Bin(binName, "val2")); + client.Put(wp, new PutHandler(this), key, new Bin(binName, "val2")); - client.Commit(txn); + client.Commit(new CommitHandler(this), txn); - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val2"); + client.Get(null, new GetExpectHandler(this, "val2"), key); } [TestMethod] public void AsyncTxnWriteTwice() { - Key key = new(args.ns, args.set, "mrtkey2"); + Key key = new(args.ns, args.set, "asyncTxnWriteTwice"); Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; - client.Put(wp, key, new Bin(binName, "val1")); - client.Put(wp, key, new Bin(binName, "val2")); + client.Put(wp, new PutHandler(this), key, new Bin(binName, "val1")); + client.Put(wp, new PutHandler(this), key, new Bin(binName, "val2")); - client.Commit(txn); + client.Commit(new CommitHandler(this), txn); - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val2"); - } - - [TestMethod] - public void AsyncTxnWriteConflict() - { - Key key = new(args.ns, args.set, "mrtkey21"); - - Txn txn1 = new(); - Txn txn2 = new(); - - WritePolicy wp1 = client.WritePolicyDefault; - WritePolicy wp2 = client.WritePolicyDefault; - wp1.Txn = txn1; - wp2.Txn = txn2; - - client.Put(wp1, key, new Bin(binName, "val1")); - - try - { - client.Put(wp2, key, new Bin(binName, "val2")); - } - catch (AerospikeException ae) - { - if (ae.Result != ResultCode.MRT_BLOCKED) - { - throw ae; - } - } - - client.Commit(txn1); - client.Commit(txn2); - - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); + client.Get(null, new GetExpectHandler(this, "val2"), key); } [TestMethod] public void AsyncTxnWriteBlock() { - Key key = new(args.ns, args.set, "mrtkey3"); + Key key = new(args.ns, args.set, "asyncTxnWriteBlock"); - client.Put(null, key, new Bin(binName, "val1")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; - client.Put(wp, key, new Bin(binName, "val2")); + client.Put(wp, new PutHandler(this), key, new Bin(binName, "val2")); try { // This write should be blocked. - client.Put(null, key, new Bin(binName, "val3")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val3")); throw new AerospikeException("Unexpected success"); } catch (AerospikeException e) @@ -135,236 +101,225 @@ public void AsyncTxnWriteBlock() } } - client.Commit(txn); + client.Commit(new CommitHandler(this), txn); } [TestMethod] public void AsyncTxnWriteRead() { - Key key = new(args.ns, args.set, "mrtkey4"); + Key key = new(args.ns, args.set, "asyncTxnWriteRead"); - client.Put(null, key, new Bin(binName, "val1")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; - client.Put(wp, key, new Bin(binName, "val2")); + client.Put(wp, new PutHandler(this), key, new Bin(binName, "val2")); - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); + client.Get(null, new GetExpectHandler(this, "val1"), key); - client.Commit(txn); + client.Commit(new CommitHandler(this), txn); - record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val2"); + client.Get(null, new GetExpectHandler(this, "val2"), key); } [TestMethod] public void AsyncTxnWriteAbort() { - Key key = new(args.ns, args.set, "mrtkey5"); + Key key = new(args.ns, args.set, "asyncTxnWriteAbort"); - client.Put(null, key, new Bin(binName, "val1")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; - client.Put(wp, key, new Bin(binName, "val2")); + client.Put(wp, new PutHandler(this), key, new Bin(binName, "val2")); Policy p = client.ReadPolicyDefault; p.Txn = txn; - Record record = client.Get(p, key); - AssertBinEqual(key, record, binName, "val2"); + client.Get(p, new GetExpectHandler(this, "val2"), key); - client.Abort(txn); + client.Abort(new AbortHandler(this), txn); - record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); + client.Get(null, new GetExpectHandler(this, "val1"), key); } [TestMethod] public void AsyncTxnDelete() { - Key key = new(args.ns, args.set, "mrtkey6"); + Key key = new(args.ns, args.set, "asyncTxnDelete"); - client.Put(null, key, new Bin(binName, "val1")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; wp.durableDelete = true; - client.Delete(wp, key); + client.Delete(wp, new DeleteHandler(this), key); - client.Commit(txn); + client.Commit(new CommitHandler(this), txn); - Record record = client.Get(null, key); - Assert.IsNull(record); + client.Get(null, new GetExpectHandler(this, null), key); } [TestMethod] public void AsyncTxnDeleteAbort() { - Key key = new(args.ns, args.set, "mrtkey7"); + Key key = new(args.ns, args.set, "asyncTxnDeleteAbort"); - client.Put(null, key, new Bin(binName, "val1")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; wp.durableDelete = true; - client.Delete(wp, key); + client.Delete(wp, new DeleteHandler(this), key); - client.Abort(txn); + client.Abort(new AbortHandler(this), txn); - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); + client.Get(null, new GetExpectHandler(this, "val1"), key); } [TestMethod] public void AsyncTxnDeleteTwice() { - Key key = new(args.ns, args.set, "mrtkey8"); + Key key = new(args.ns, args.set, "asyncTxnDeleteTwice"); Txn txn = new(); - client.Put(null, key, new Bin(binName, "val1")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; wp.durableDelete = true; - client.Delete(wp, key); - client.Delete(wp, key); + client.Delete(wp, new DeleteHandler(this), key); + client.Delete(wp, new DeleteHandler(this), key); - client.Commit(txn); + client.Commit(new CommitHandler(this), txn); - Record record = client.Get(null, key); - Assert.IsNull(record); + client.Get(null, new GetExpectHandler(this, null), key); } [TestMethod] public void AsyncTxnTouch() { - Key key = new(args.ns, args.set, "mrtkey9"); + Key key = new(args.ns, args.set, "asyncTxnTouch"); - client.Put(null, key, new Bin(binName, "val1")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; - client.Touch(wp, key); + client.Touch(wp, new TouchHandler(this), key); - client.Commit(txn); + client.Commit(new CommitHandler(this), txn); - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); + client.Get(null, new GetExpectHandler(this, "val1"), key); } [TestMethod] public void AsyncTxnTouchAbort() { - Key key = new(args.ns, args.set, "mrtkey10"); + Key key = new(args.ns, args.set, "asyncTxnTouchAbort"); - client.Put(null, key, new Bin(binName, "val1")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; - client.Touch(wp, key); + client.Touch(wp, new TouchHandler(this), key); - client.Abort(txn); + client.Abort(new AbortHandler(this), txn); - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); + client.Get(null, new GetExpectHandler(this, "val1"), key); } [TestMethod] public void AsyncTxnOperateWrite() { - Key key = new(args.ns, args.set, "mrtkey11"); + Key key = new(args.ns, args.set, "asyncTxnOperateWrite3"); + Bin bin2 = new("bin2", "bal1"); - client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val1"), bin2); Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; - Record record = client.Operate(wp, key, + client.Operate(wp, new OperateExpectHandler(this, bin2), key, Operation.Put(new Bin(binName, "val2")), Operation.Get("bin2") ); - AssertBinEqual(key, record, "bin2", "bal1"); - client.Commit(txn); + client.Commit(new CommitHandler(this), txn); - record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val2"); + client.Get(null, new GetExpectHandler(this, "val2"), key); } [TestMethod] public void AsyncTxnOperateWriteAbort() { - Key key = new(args.ns, args.set, "mrtkey12"); + Key key = new(args.ns, args.set, "asyncTxnOperateWriteAbort"); + Bin bin2 = new("bin2", "bal1"); - client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val1"), bin2); Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; - Record record = client.Operate(wp, key, + client.Operate(wp, new OperateExpectHandler(this, bin2), key, Operation.Put(new Bin(binName, "val2")), - Operation.Get("bin2") + Operation.Get(bin2.name) ); - AssertBinEqual(key, record, "bin2", "bal1"); - client.Abort(txn); + client.Abort(new AbortHandler(this), txn); - record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); + client.Get(null, new GetExpectHandler(this, "val1"), key); } [TestMethod] public void AsyncTxnUDF() { - Key key = new(args.ns, args.set, "mrtkey13"); + Key key = new(args.ns, args.set, "asyncTxnUDF"); + Bin bin2 = new("bin2", "bal1"); - client.Put(null, key, new Bin(binName, "val1")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val1"), bin2); Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; - client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); + client.Execute(wp, new UDFHandler(this), key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); - client.Commit(txn); + client.Commit(new CommitHandler(this), txn); - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val2"); + client.Get(null, new GetExpectHandler(this, "val2"), key); } [TestMethod] public void AsyncTxnUDFAbort() { - Key key = new(args.ns, args.set, "mrtkey14"); + Key key = new(args.ns, args.set, "asyncTxnUDFAbort"); + Bin bin2 = new("bin2", "bal1"); - client.Put(null, key, new Bin(binName, "val1")); + client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); Txn txn = new(); WritePolicy wp = client.WritePolicyDefault; wp.Txn = txn; - client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); + client.Execute(wp, new UDFHandler(this), key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); - client.Abort(txn); + client.Abort(new AbortHandler(this), txn); - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); + client.Get(null, new GetExpectHandler(this, "val1"), key); } [TestMethod] @@ -375,14 +330,13 @@ public void AsyncTxnBatch() for (int i = 0; i < keys.Length; i++) { - Key key = new(args.ns, args.set, i); + Key key = new(args.ns, args.set, "asyncTxnBatch" + i); keys[i] = key; client.Put(null, key, bin); } - Record[] recs = client.Get(null, keys); - AssertBatchEqual(keys, recs, 1); + client.Get(null, new BatchGetExpectHandler(this, 1), keys); Txn txn = new(); @@ -391,34 +345,11 @@ public void AsyncTxnBatch() BatchPolicy bp = BatchPolicy.WriteDefault(); bp.Txn = txn; - BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); + client.Operate(bp, null, new BatchOperateHandler(this), keys, Operation.Put(bin)); - if (!bresults.status) - { - StringBuilder sb = new StringBuilder(); - sb.Append("Batch failed:"); - sb.Append(System.Environment.NewLine); + client.Commit(new CommitHandler(this), txn); - foreach (BatchRecord br in bresults.records) - { - if (br.resultCode == 0) - { - sb.Append("Record: " + br.record); - } - else - { - sb.Append("ResultCode: " + br.resultCode); - } - sb.Append(System.Environment.NewLine); - } - - throw new AerospikeException(sb.ToString()); - } - - client.Commit(txn); - - recs = client.Get(null, keys); - AssertBatchEqual(keys, recs, 2); + client.Get(null, new BatchGetExpectHandler(this, 1), keys); } [TestMethod] @@ -429,14 +360,13 @@ public void AsyncTxnBatchAbort() for (int i = 0; i < keys.Length; i++) { - Key key = new(args.ns, args.set, i); + Key key = new(args.ns, args.set, "asyncTxnBatch" + i); keys[i] = key; client.Put(null, key, bin); } - Record[] recs = client.Get(null, keys); - AssertBatchEqual(keys, recs, 1); + client.Get(null, new BatchGetExpectHandler(this, 1), keys); Txn txn = new(); @@ -445,47 +375,299 @@ public void AsyncTxnBatchAbort() BatchPolicy bp = BatchPolicy.WriteDefault(); bp.Txn = txn; - BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); + client.Operate(bp, null, new BatchOperateHandler(this), keys, Operation.Put(bin)); + + client.Abort(new AbortHandler(this), txn); + + client.Get(null, new BatchGetExpectHandler(this, 1), keys); + } + + private class CommitHandler : CommitListener + { + private readonly TestAsyncTxn parent; + + public CommitHandler(TestAsyncTxn parent) + { + this.parent = parent; + } + + public void OnSuccess(CommitStatusType status) + { + parent.NotifyCompleted(); + } + + public void OnFailure(AerospikeException.Commit e) + { + parent.SetError(e); + parent.NotifyCompleted(); + } + } + + private class AbortHandler : AbortListener + { + private readonly TestAsyncTxn parent; + + public AbortHandler(TestAsyncTxn parent) + { + this.parent = parent; + } + + public void OnSuccess(AbortStatusType status) + { + parent.NotifyCompleted(); + } - if (!bresults.status) + public void OnFailure(AerospikeException e) { - StringBuilder sb = new StringBuilder(); - sb.Append("Batch failed:"); - sb.Append(System.Environment.NewLine); + parent.SetError(e); + parent.NotifyCompleted(); + } + } + + private class PutHandler : WriteListener + { + private readonly TestAsyncTxn parent; - foreach (BatchRecord br in bresults.records) + public PutHandler(TestAsyncTxn parent) + { + this.parent = parent; + } + + public void OnSuccess(Key key) + { + parent.NotifyCompleted(); + } + + public void OnFailure(AerospikeException e) + { + parent.SetError(e); + parent.NotifyCompleted(); + } + } + + private class GetExpectHandler : RecordListener + { + private readonly TestAsyncTxn parent; + private string expect; + + public GetExpectHandler(TestAsyncTxn parent, string expect) + { + this.parent = parent; + this.expect = expect; + } + + public void OnSuccess(Key key, Record record) + { + if (expect != null) { - if (br.resultCode == 0) + if (parent.AssertBinEqual(key, record, binName, expect)) { - sb.Append("Record: " + br.record); + parent.NotifyCompleted(); } else { - sb.Append("ResultCode: " + br.resultCode); + parent.NotifyCompleted(); } + } + else + { + if (parent.AssertRecordNotFound(key, record)) + { + parent.NotifyCompleted(); + } + else + { + parent.NotifyCompleted(); + } + } + } + + public void OnFailure(AerospikeException e) + { + parent.SetError(e); + parent.NotifyCompleted(); + } + } + + private class OperateExpectHandler : RecordListener + { + private readonly TestAsyncTxn parent; + private Bin? expect; + + public OperateExpectHandler(TestAsyncTxn parent, Bin? expect) + { + this.parent = parent; + this.expect = expect; + } + + public void OnSuccess(Key key, Record record) + { + if (expect != null) + { + if (parent.AssertBinEqual(key, record, expect?.name, expect?.value.Object)) + { + parent.NotifyCompleted(); + } + else + { + parent.NotifyCompleted(); + } + } + else + { + if (parent.AssertRecordNotFound(key, record)) + { + parent.NotifyCompleted(); + } + else + { + parent.NotifyCompleted(); + } + } + } + + public void OnFailure(AerospikeException e) + { + parent.SetError(e); + parent.NotifyCompleted(); + } + } + + private class UDFHandler : ExecuteListener + { + private readonly TestAsyncTxn parent; + + public UDFHandler(TestAsyncTxn parent) + { + this.parent = parent; + } + + public void OnSuccess(Key key, Object obj) + { + parent.NotifyCompleted(); + } + + public void OnFailure(AerospikeException e) + { + parent.SetError(e); + parent.NotifyCompleted(); + } + } + + private class BatchGetExpectHandler : RecordArrayListener + { + private readonly TestAsyncTxn parent; + private readonly int expected; + + public BatchGetExpectHandler(TestAsyncTxn parent, int expected) + { + this.parent = parent; + this.expected = expected; + } + + public void OnSuccess(Key[] keys, Record[] records) + { + if (parent.AssertBatchEqual(keys, records, binName, expected)) + { + parent.NotifyCompleted(); + } + else + { + parent.NotifyCompleted(); + } + } + + public void OnFailure(AerospikeException e) + { + parent.SetError(e); + parent.NotifyCompleted(); + } + } + + private class BatchOperateHandler : BatchRecordArrayListener + { + private TestAsyncTxn parent; + + public BatchOperateHandler(TestAsyncTxn parent) + { + this.parent = parent; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + if (status) + { + parent.NotifyCompleted(); + } + else + { + StringBuilder sb = new StringBuilder(); + sb.Append("Batch failed:"); sb.Append(System.Environment.NewLine); + + foreach (BatchRecord br in records) + { + if (br.resultCode == 0) + { + sb.Append("Record: " + br.record); + } + else + { + sb.Append("ResultCode: " + br.resultCode); + } + sb.Append(System.Environment.NewLine); + } + parent.SetError(new AerospikeException(sb.ToString())); + parent.NotifyCompleted(); } + } - throw new AerospikeException(sb.ToString()); + public void OnFailure(BatchRecord[] records, AerospikeException e) + { + parent.SetError(e); + parent.NotifyCompleted(); } + } - client.Abort(txn); + private class TouchHandler : WriteListener + { + private TestAsyncTxn parent; - recs = client.Get(null, keys); - AssertBatchEqual(keys, recs, 1); + public TouchHandler(TestAsyncTxn parent) + { + this.parent = parent; + } + + public void OnSuccess(Key key) + { + parent.NotifyCompleted(); + } + + public void OnFailure(AerospikeException e) + { + parent.SetError(e); + parent.NotifyCompleted(); + } } - private void AsyncAssertBatchEqual(Key[] keys, Record[] recs, int expected) + private class DeleteHandler : DeleteListener { - for (int i = 0; i < keys.Length; i++) + private TestAsyncTxn parent; + + public DeleteHandler(TestAsyncTxn parent) { - Key key = keys[i]; - Record rec = recs[i]; + this.parent = parent; + } - Assert.IsNotNull(rec); + public void OnSuccess(Key key, bool existed) + { + parent.NotifyCompleted(); + } - int received = rec.GetInt(binName); - Assert.AreEqual(expected, received); + public void OnFailure(AerospikeException e) + { + parent.SetError(e); + parent.NotifyCompleted(); } } } diff --git a/AerospikeTest/settings.json b/AerospikeTest/settings.json index 7ca33c77..24dbc27b 100644 --- a/AerospikeTest/settings.json +++ b/AerospikeTest/settings.json @@ -7,8 +7,8 @@ "ClusterName": "", "Namespace": "test", "Set": "test", - "User": "", - "Password": "", + "User": "charlie", + "Password": "123456", "Timeout": 25000, "UseServicesAlternate": true, "TlsEnable": false, From bc59a5624321307e6174d3f370a90a40a069b745 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Wed, 11 Sep 2024 11:27:00 -0600 Subject: [PATCH 05/41] Tests working --- AerospikeClient/Async/AsyncBatch.cs | 4356 +++++------ AerospikeClient/Async/AsyncDelete.cs | 185 +- AerospikeClient/Async/AsyncMultiCommand.cs | 279 +- AerospikeClient/Async/AsyncSingleCommand.cs | 179 +- AerospikeClient/Async/AsyncTouch.cs | 167 +- AerospikeClient/Async/AsyncTxnClose.cs | 177 +- .../Async/AsyncTxnMarkRollForward.cs | 177 +- AerospikeClient/Async/AsyncTxnMonitor.cs | 408 +- AerospikeClient/Async/AsyncTxnRoll.cs | 911 +-- AerospikeClient/Async/AsyncWrite.cs | 195 +- AerospikeClient/Command/Batch.cs | 1684 ++--- AerospikeClient/Command/BatchExecutor.cs | 337 +- AerospikeClient/Command/Command.cs | 6511 +++++++++-------- AerospikeClient/Command/DeleteCommand.cs | 142 +- AerospikeClient/Command/SyncCommand.cs | 961 ++- AerospikeClient/Command/TouchCommand.cs | 107 +- AerospikeClient/Command/TxnClose.cs | 103 +- AerospikeClient/Command/TxnMarkRollForward.cs | 104 +- AerospikeClient/Command/TxnRoll.cs | 532 +- AerospikeClient/Command/WriteCommand.cs | 117 +- AerospikeClient/Main/AerospikeClient.cs | 5352 +++++++------- AerospikeClient/Main/Txn.cs | 459 +- AerospikeTest/Args.cs | 858 ++- AerospikeTest/Async/TestAsyncTxn.cs | 1672 +++-- AerospikeTest/Sync/Basic/TestBatch.cs | 996 +-- AerospikeTest/Sync/Basic/TestTxn.cs | 984 +-- AerospikeTest/settings.json | 54 +- 27 files changed, 14202 insertions(+), 13805 deletions(-) diff --git a/AerospikeClient/Async/AsyncBatch.cs b/AerospikeClient/Async/AsyncBatch.cs index 1d1a808e..4fae2e41 100644 --- a/AerospikeClient/Async/AsyncBatch.cs +++ b/AerospikeClient/Async/AsyncBatch.cs @@ -1,2178 +1,2178 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - //------------------------------------------------------- - // ReadList - //------------------------------------------------------- - - public sealed class AsyncBatchReadListExecutor : AsyncBatchExecutor - { - private readonly BatchListListener listener; - private readonly List records; - - public AsyncBatchReadListExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchListListener listener, - List records - ) : base(cluster, true) - { - this.listener = listener; - this.records = records; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); - AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new AsyncBatchReadListCommand(this, cluster, batchNode, policy, records); - } - this.commands = commands; - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(records); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchReadListCommand : AsyncBatchCommand - { - private readonly List records; - - public AsyncBatchReadListCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - List records - ) : base(parent, cluster, batch, batchPolicy, true) - { - this.records = records; - } - - public AsyncBatchReadListCommand(AsyncBatchReadListCommand other) : base(other) - { - this.records = other.records; - } - - protected internal override void WriteBuffer() - { - if (batch.node.HasBatchAny) - { - SetBatchOperate(batchPolicy, records, batch); - } - else - { - SetBatchRead(batchPolicy, records, batch); - } - } - - protected internal override void ParseRow() - { - BatchRead record = records[batchIndex]; - - ParseFieldsRead(record.key); - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - } - else - { - record.SetError(resultCode, false); - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchReadListCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchReadListCommand(parent, cluster, batchNode, batchPolicy, records); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); - } - } - - //------------------------------------------------------- - // ReadSequence - //------------------------------------------------------- - - public sealed class AsyncBatchReadSequenceExecutor : AsyncBatchExecutor - { - private readonly BatchSequenceListener listener; - - public AsyncBatchReadSequenceExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchSequenceListener listener, - List records - ) : base(cluster, true) - { - this.listener = listener; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); - AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new AsyncBatchReadSequenceCommand(this, cluster, batchNode, policy, listener, records); - } - this.commands = commands; - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchReadSequenceCommand : AsyncBatchCommand - { - private readonly BatchSequenceListener listener; - private readonly List records; - - public AsyncBatchReadSequenceCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - BatchSequenceListener listener, - List records - ) : base(parent, cluster, batch, batchPolicy, true) - { - this.listener = listener; - this.records = records; - } - - public AsyncBatchReadSequenceCommand(AsyncBatchReadSequenceCommand other) : base(other) - { - this.listener = other.listener; - this.records = other.records; - } - - protected internal override void WriteBuffer() - { - if (batch.node.HasBatchAny) - { - SetBatchOperate(batchPolicy, records, batch); - } - else - { - SetBatchRead(batchPolicy, records, batch); - } - } - - protected internal override void ParseRow() - { - BatchRead record = records[batchIndex]; - - ParseFieldsRead(record.key); - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - } - else - { - record.SetError(resultCode, false); - } - listener.OnRecord(record); - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchReadSequenceCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchReadSequenceCommand(parent, cluster, batchNode, batchPolicy, listener, records); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); - } - } - - //------------------------------------------------------- - // GetArray - //------------------------------------------------------- - - public sealed class AsyncBatchGetArrayExecutor : AsyncBatchExecutor - { - private readonly Key[] keys; - private readonly Record[] records; - private readonly RecordArrayListener listener; - - public AsyncBatchGetArrayExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - RecordArrayListener listener, - Key[] keys, - string[] binNames, - Operation[] ops, - int readAttr, - bool isOperation - ) : base(cluster, false) - { - this.keys = keys; - this.records = new Record[keys.Length]; - this.listener = listener; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); - AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new AsyncBatchGetArrayCommand(this, cluster, batchNode, policy, keys, binNames, ops, records, readAttr, isOperation); - } - this.commands = commands; - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(keys, records); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(new AerospikeException.BatchRecords(records, ae)); - } - } - - sealed class AsyncBatchGetArrayCommand : AsyncBatchCommand - { - private readonly Key[] keys; - private readonly string[] binNames; - private readonly Operation[] ops; - private readonly Record[] records; - private readonly int readAttr; - - public AsyncBatchGetArrayCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - string[] binNames, - Operation[] ops, - Record[] records, - int readAttr, - bool isOperation - ) : base(parent, cluster, batch, batchPolicy, isOperation) - { - this.keys = keys; - this.binNames = binNames; - this.ops = ops; - this.records = records; - this.readAttr = readAttr; - } - - public AsyncBatchGetArrayCommand(AsyncBatchGetArrayCommand other) : base(other) - { - this.keys = other.keys; - this.binNames = other.binNames; - this.ops = other.ops; - this.records = other.records; - this.readAttr = other.readAttr; - } - - protected internal override void WriteBuffer() - { - if (batch.node.HasBatchAny) - { - BatchAttr attr = new(batchPolicy, readAttr, ops); - SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); - } - else - { - SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr); - } - } - - protected internal override void ParseRow() - { - ParseFieldsRead(keys[batchIndex]); - - if (resultCode == 0) - { - records[batchIndex] = ParseRecord(); - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchGetArrayCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchGetArrayCommand(parent, cluster, batchNode, batchPolicy, keys, binNames, ops, records, readAttr, isOperation); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); - } - } - - //------------------------------------------------------- - // GetSequence - //------------------------------------------------------- - - public sealed class AsyncBatchGetSequenceExecutor : AsyncBatchExecutor - { - private readonly RecordSequenceListener listener; - - public AsyncBatchGetSequenceExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - RecordSequenceListener listener, - Key[] keys, - string[] binNames, - Operation[] ops, - int readAttr, - bool isOperation - ) : base(cluster, false) - { - this.listener = listener; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); - AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new AsyncBatchGetSequenceCommand(this, cluster, batchNode, policy, keys, binNames, ops, listener, readAttr, isOperation); - } - this.commands = commands; - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchGetSequenceCommand : AsyncBatchCommand - { - private readonly Key[] keys; - private readonly string[] binNames; - private readonly Operation[] ops; - private readonly RecordSequenceListener listener; - private readonly int readAttr; - - public AsyncBatchGetSequenceCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - string[] binNames, - Operation[] ops, - RecordSequenceListener listener, - int readAttr, - bool isOperation - ) : base(parent, cluster, batch, batchPolicy, isOperation) - { - this.keys = keys; - this.binNames = binNames; - this.ops = ops; - this.listener = listener; - this.readAttr = readAttr; - } - - public AsyncBatchGetSequenceCommand(AsyncBatchGetSequenceCommand other) : base(other) - { - this.keys = other.keys; - this.binNames = other.binNames; - this.ops = other.ops; - this.listener = other.listener; - this.readAttr = other.readAttr; - } - - protected internal override void WriteBuffer() - { - if (batch.node.HasBatchAny) - { - BatchAttr attr = new(batchPolicy, readAttr, ops); - SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); - } - else - { - SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr); - } - } - - protected internal override void ParseRow() - { - Key keyOrig = keys[batchIndex]; - - ParseFieldsRead(keyOrig); - - if (resultCode == 0) - { - Record record = ParseRecord(); - listener.OnRecord(keyOrig, record); - } - else - { - listener.OnRecord(keyOrig, null); - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchGetSequenceCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchGetSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, binNames, ops, listener, readAttr, isOperation); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); - } - } - - //------------------------------------------------------- - // ExistsArray - //------------------------------------------------------- - - public sealed class AsyncBatchExistsArrayExecutor : AsyncBatchExecutor - { - private readonly Key[] keys; - private readonly bool[] existsArray; - private readonly ExistsArrayListener listener; - - public AsyncBatchExistsArrayExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - Key[] keys, - ExistsArrayListener listener - ) : base(cluster,false) - { - this.keys = keys; - this.existsArray = new bool[keys.Length]; - this.listener = listener; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); - AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new AsyncBatchExistsArrayCommand(this, cluster, batchNode, policy, keys, existsArray); - } - this.commands = commands; - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(keys, existsArray); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(new AerospikeException.BatchExists(existsArray, ae)); - } - } - - sealed class AsyncBatchExistsArrayCommand : AsyncBatchCommand - { - private readonly Key[] keys; - private readonly bool[] existsArray; - - public AsyncBatchExistsArrayCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - bool[] existsArray - ) : base(parent, cluster, batch, batchPolicy, false) - { - this.keys = keys; - this.existsArray = existsArray; - } - - public AsyncBatchExistsArrayCommand(AsyncBatchExistsArrayCommand other) : base(other) - { - this.keys = other.keys; - this.existsArray = other.existsArray; - } - - protected internal override void WriteBuffer() - { - if (batch.node.HasBatchAny) - { - BatchAttr attr = new(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); - SetBatchOperate(batchPolicy, keys, batch, null, null, attr); - } - else - { - SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA); - } - } - - protected internal override void ParseRow() - { - ParseFieldsRead(keys[batchIndex]); - existsArray[batchIndex] = resultCode == 0; - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchExistsArrayCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchExistsArrayCommand(parent, cluster, batchNode, batchPolicy, keys, existsArray); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); - } - } - - //------------------------------------------------------- - // ExistsSequence - //------------------------------------------------------- - - public sealed class AsyncBatchExistsSequenceExecutor : AsyncBatchExecutor - { - private readonly ExistsSequenceListener listener; - - public AsyncBatchExistsSequenceExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - Key[] keys, - ExistsSequenceListener listener - ) : base(cluster, false) - { - this.listener = listener; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); - AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new AsyncBatchExistsSequenceCommand(this, cluster, batchNode, policy, keys, listener); - } - this.commands = commands; - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - - } - - sealed class AsyncBatchExistsSequenceCommand : AsyncBatchCommand - { - private readonly Key[] keys; - private readonly ExistsSequenceListener listener; - - public AsyncBatchExistsSequenceCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - ExistsSequenceListener listener - ) : base(parent, cluster, batch, batchPolicy, false) - { - this.keys = keys; - this.listener = listener; - } - - public AsyncBatchExistsSequenceCommand(AsyncBatchExistsSequenceCommand other) : base(other) - { - this.keys = other.keys; - this.listener = other.listener; - } - - protected internal override void WriteBuffer() - { - if (batch.node.HasBatchAny) - { - BatchAttr attr = new(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); - SetBatchOperate(batchPolicy, keys, batch, null, null, attr); - } - else - { - SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA); - } - } - - protected internal override void ParseRow() - { - Key keyOrig = keys[batchIndex]; - ParseFieldsRead(keyOrig); - listener.OnExists(keyOrig, resultCode == 0); - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchExistsSequenceCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchExistsSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, listener); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); - } - } - - //------------------------------------------------------- - // OperateList - //------------------------------------------------------- - - public sealed class AsyncBatchOperateListExecutor : AsyncBatchExecutor - { - internal readonly BatchOperateListListener listener; - internal readonly List records; - - public AsyncBatchOperateListExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchOperateListListener listener, - List records - ) : base(cluster, true) - { - this.listener = listener; - this.records = records; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchOperateListCommand(this, cluster, batchNode, policy, records); - } - this.commands = tasks; - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(records, GetStatus()); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchOperateListCommand : AsyncBatchCommand - { - internal readonly List records; - - public AsyncBatchOperateListCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - List records - ) : base(parent, cluster, batch, batchPolicy, true) - { - this.records = records; - } - - public AsyncBatchOperateListCommand(AsyncBatchOperateListCommand other) : base(other) - { - this.records = other.records; - } - - protected internal override bool IsWrite() - { - // This method is only called to set inDoubt on node level errors. - // SetError() will filter out reads when setting record level inDoubt. - return true; - } - - protected internal override void WriteBuffer() - { - SetBatchOperate(batchPolicy, records, batch); - } - - protected internal override void ParseRow() - { - BatchRecord record = records[batchIndex]; - - ParseFields(record.key, record.hasWrite); - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - return; - } - - if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - Record r = ParseRecord(); - string m = r.GetString("FAILURE"); - - if (m != null) - { - // Need to store record because failure bin contains an error message. - record.record = r; - record.resultCode = resultCode; - record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter); - parent.SetRowError(); - return; - } - } - - record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); - parent.SetRowError(); - } - - internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - record.inDoubt = record.hasWrite; - - if (record.inDoubt && policy.Txn != null) - { - policy.Txn.OnWriteInDoubt(record.key); - } - } - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchOperateListCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchOperateListCommand(parent, cluster, batchNode, batchPolicy, records); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); - } - } - - //------------------------------------------------------- - // OperateSequence - //------------------------------------------------------- - - public sealed class AsyncBatchOperateSequenceExecutor : AsyncBatchExecutor - { - internal readonly BatchRecordSequenceListener listener; - - public AsyncBatchOperateSequenceExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchRecordSequenceListener listener, - List records - ) : base(cluster, true) - { - this.listener = listener; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchOperateSequenceCommand(this, cluster, batchNode, policy, listener, records); - } - this.commands = tasks; - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchOperateSequenceCommand : AsyncBatchCommand - { - internal readonly BatchRecordSequenceListener listener; - internal readonly List records; - - public AsyncBatchOperateSequenceCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - BatchRecordSequenceListener listener, - List records - ) : base(parent, cluster, batch, batchPolicy, true) - { - this.listener = listener; - this.records = records; - } - - public AsyncBatchOperateSequenceCommand(AsyncBatchOperateSequenceCommand other) : base(other) - { - this.listener = other.listener; - this.records = other.records; - } - - protected internal override bool IsWrite() - { - // This method is only called to set inDoubt on node level errors. - // SetError() will filter out reads when setting record level inDoubt. - return true; - } - - protected internal override void WriteBuffer() - { - SetBatchOperate(batchPolicy, records, batch); - } - - protected internal override void ParseRow() - { - BatchRecord record = records[batchIndex]; - - ParseFields(record.key, record.hasWrite); - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - } - else if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - Record r = ParseRecord(); - string m = r.GetString("FAILURE"); - - if (m != null) - { - // Need to store record because failure bin contains an error message. - record.record = r; - record.resultCode = resultCode; - record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter); - } - else - { - record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); - } - } - else - { - record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); - } - AsyncBatch.OnRecord(cluster, listener, record, batchIndex); - } - - internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - // Set inDoubt, but do not call OnRecord() because user already has access to full - // BatchRecord list and can examine each record for inDoubt when the exception occurs. - record.inDoubt = record.hasWrite; - - if (record.inDoubt && policy.Txn != null) - { - policy.Txn.OnWriteInDoubt(record.key); - } - } - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchOperateSequenceCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchOperateSequenceCommand(parent, cluster, batchNode, batchPolicy, listener, records); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); - } - } - - //------------------------------------------------------- - // OperateRecordArray - //------------------------------------------------------- - - public sealed class AsyncBatchOperateRecordArrayExecutor : AsyncBatchExecutor - { - internal readonly BatchRecordArrayListener listener; - internal readonly BatchRecord[] records; - - public AsyncBatchOperateRecordArrayExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchRecordArrayListener listener, - Key[] keys, - Operation[] ops, - BatchAttr attr - ) : base(cluster, true) - { - this.listener = listener; - this.records = new BatchRecord[keys.Length]; - - for (int i = 0; i < keys.Length; i++) - { - this.records[i] = new BatchRecord(keys[i], attr.hasWrite); - } - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, attr.hasWrite, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchOperateRecordArrayCommand(this, cluster, batchNode, policy, keys, ops, records, attr); - } - this.commands = tasks; - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(records, GetStatus()); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(records, ae); - } - } - - sealed class AsyncBatchOperateRecordArrayCommand : AsyncBatchCommand - { - internal readonly Key[] keys; - internal readonly Operation[] ops; - internal readonly BatchRecord[] records; - internal readonly BatchAttr attr; - - public AsyncBatchOperateRecordArrayCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - Operation[] ops, - BatchRecord[] records, - BatchAttr attr - ) : base(parent, cluster, batch, batchPolicy, ops != null) - { - this.keys = keys; - this.ops = ops; - this.records = records; - this.attr = attr; - } - - public AsyncBatchOperateRecordArrayCommand(AsyncBatchOperateRecordArrayCommand other) : base(other) - { - this.keys = other.keys; - this.ops = other.ops; - this.records = other.records; - this.attr = other.attr; - } - - protected internal override bool IsWrite() - { - return attr.hasWrite; - } - - protected internal override void WriteBuffer() - { - SetBatchOperate(batchPolicy, keys, batch, null, ops, attr); - } - - protected internal override void ParseRow() - { - BatchRecord record = records[batchIndex]; - - ParseFields(record.key, record.hasWrite); - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - } - else - { - record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); - parent.SetRowError(); - } - } - - internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt || !attr.hasWrite) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - record.inDoubt = true; - - policy.Txn?.OnWriteInDoubt(record.key); - } - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchOperateRecordArrayCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchOperateRecordArrayCommand(parent, cluster, batchNode, batchPolicy, keys, ops, records, attr); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); - } - } - - //------------------------------------------------------- - // OperateRecordSequence - //------------------------------------------------------- - - public sealed class AsyncBatchOperateRecordSequenceExecutor : AsyncBatchExecutor - { - internal readonly BatchRecordSequenceListener listener; - private readonly bool[] sent; - - public AsyncBatchOperateRecordSequenceExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchRecordSequenceListener listener, - Key[] keys, - Operation[] ops, - BatchAttr attr - ) : base(cluster, true) - { - this.listener = listener; - this.sent = new bool[keys.Length]; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, attr.hasWrite, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchOperateRecordSequenceCommand(this, cluster, batchNode, policy, keys, ops, sent, listener, attr); - } - this.commands = tasks; - } - - public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) - { - BatchRecord record = new(key, null, ae.Result, inDoubt, hasWrite); - sent[index] = true; - AsyncBatch.OnRecord(cluster, listener, record, index); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchOperateRecordSequenceCommand : AsyncBatchCommand - { - internal readonly Key[] keys; - internal readonly Operation[] ops; - internal readonly bool[] sent; - internal readonly BatchRecordSequenceListener listener; - internal readonly BatchAttr attr; - - public AsyncBatchOperateRecordSequenceCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - Operation[] ops, - bool[] sent, - BatchRecordSequenceListener listener, - BatchAttr attr - ) : base(parent, cluster, batch, batchPolicy, ops != null) - { - this.keys = keys; - this.ops = ops; - this.sent = sent; - this.listener = listener; - this.attr = attr; - } - - public AsyncBatchOperateRecordSequenceCommand(AsyncBatchOperateRecordSequenceCommand other) : base(other) - { - this.keys = other.keys; - this.ops = other.ops; - this.sent = other.sent; - this.listener = other.listener; - this.attr = other.attr; - } - - protected internal override bool IsWrite() - { - return attr.hasWrite; - } - - protected internal override void WriteBuffer() - { - SetBatchOperate(batchPolicy, keys, batch, null, ops, attr); - } - - protected internal override void ParseRow() - { - Key keyOrig = keys[batchIndex]; - - ParseFields(keyOrig, attr.hasWrite); - - BatchRecord record; - - if (resultCode == 0) - { - record = new BatchRecord(keyOrig, ParseRecord(), attr.hasWrite); - } - else - { - record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); - } - sent[batchIndex] = true; - AsyncBatch.OnRecord(cluster, listener, record, batchIndex); - } - - internal override void SetInDoubt(bool inDoubt) - { - // Set inDoubt for all unsent records, so the listener receives a full set of records. - foreach (int index in batch.offsets) - { - if (!sent[index]) - { - Key key = keys[index]; - BatchRecord record = new(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); - sent[index] = true; - - if (record.inDoubt && policy.Txn != null) - { - policy.Txn.OnWriteInDoubt(key); - } - - AsyncBatch.OnRecord(cluster, listener, record, index); - } - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchOperateRecordSequenceCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchOperateRecordSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, ops, sent, listener, attr); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sent, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); - } - } - - //------------------------------------------------------- - // UDFArray - //------------------------------------------------------- - - public sealed class AsyncBatchUDFArrayExecutor : AsyncBatchExecutor - { - internal readonly BatchRecordArrayListener listener; - internal readonly BatchRecord[] recordArray; - - public AsyncBatchUDFArrayExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchRecordArrayListener listener, - Key[] keys, - string packageName, - string functionName, - byte[] argBytes, - BatchAttr attr - ) : base(cluster, true) - { - this.listener = listener; - this.recordArray = new BatchRecord[keys.Length]; - - for (int i = 0; i < keys.Length; i++) - { - this.recordArray[i] = new BatchRecord(keys[i], attr.hasWrite); - } - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, recordArray, attr.hasWrite, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchUDFArrayCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, recordArray, attr); - } - this.commands = tasks; - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(recordArray, GetStatus()); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(recordArray, ae); - } - } - - public sealed class AsyncBatchUDFArrayCommand : AsyncBatchCommand - { - internal readonly Key[] keys; - internal readonly string packageName; - internal readonly string functionName; - internal readonly byte[] argBytes; - internal readonly BatchRecord[] records; - internal readonly BatchAttr attr; - - public AsyncBatchUDFArrayCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - string packageName, - string functionName, - byte[] argBytes, - BatchRecord[] records, - BatchAttr attr - ) : base(parent, cluster, batch, batchPolicy, false) - { - this.keys = keys; - this.packageName = packageName; - this.functionName = functionName; - this.argBytes = argBytes; - this.records = records; - this.attr = attr; - } - - public AsyncBatchUDFArrayCommand(AsyncBatchUDFArrayCommand other) : base(other) - { - this.keys = other.keys; - this.packageName = other.packageName; - this.functionName = other.functionName; - this.argBytes = other.argBytes; - this.records = other.records; - this.attr = other.attr; - } - - protected internal override bool IsWrite() - { - return attr.hasWrite; - } - - protected internal override void WriteBuffer() - { - SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr); - } - - protected internal override void ParseRow() - { - BatchRecord record = records[batchIndex]; - - ParseFields(record.key, record.hasWrite); - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - return; - } - - if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - Record r = ParseRecord(); - string m = r.GetString("FAILURE"); - - if (m != null) - { - // Need to store record because failure bin contains an error message. - record.record = r; - record.resultCode = resultCode; - record.inDoubt = Command.BatchInDoubt(attr.hasWrite, commandSentCounter); - parent.SetRowError(); - return; - } - } - - record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); - parent.SetRowError(); - } - - internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt || !attr.hasWrite) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - record.inDoubt = true; - - policy.Txn?.OnWriteInDoubt(record.key); - } - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchUDFArrayCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchUDFArrayCommand(parent, cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); - } - } - - //------------------------------------------------------- - // UDFSequence - //------------------------------------------------------- - - public sealed class AsyncBatchUDFSequenceExecutor : AsyncBatchExecutor - { - internal readonly BatchRecordSequenceListener listener; - private readonly bool[] sent; - - public AsyncBatchUDFSequenceExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchRecordSequenceListener listener, - Key[] keys, - string packageName, - string functionName, - byte[] argBytes, - BatchAttr attr - ) : base(cluster, true) - { - this.listener = listener; - this.sent = new bool[keys.Length]; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, attr.hasWrite, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchUDFSequenceCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, sent, listener, attr); - } - this.commands = tasks; - } - - public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) - { - BatchRecord record = new(key, null, ae.Result, inDoubt, hasWrite); - sent[index] = true; - AsyncBatch.OnRecord(cluster, listener, record, index); - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(ae); - } - } - - sealed class AsyncBatchUDFSequenceCommand : AsyncBatchCommand - { - internal readonly Key[] keys; - internal readonly string packageName; - internal readonly string functionName; - internal readonly byte[] argBytes; - internal readonly bool[] sent; - internal readonly BatchRecordSequenceListener listener; - internal readonly BatchAttr attr; - - public AsyncBatchUDFSequenceCommand - ( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - string packageName, - string functionName, - byte[] argBytes, - bool[] sent, - BatchRecordSequenceListener listener, - BatchAttr attr - ) : base(parent, cluster, batch, batchPolicy, false) - { - this.keys = keys; - this.packageName = packageName; - this.functionName = functionName; - this.argBytes = argBytes; - this.sent = sent; - this.listener = listener; - this.attr = attr; - } - - public AsyncBatchUDFSequenceCommand(AsyncBatchUDFSequenceCommand other) : base(other) - { - this.keys = other.keys; - this.packageName = other.packageName; - this.functionName = other.functionName; - this.argBytes = other.argBytes; - this.sent = other.sent; - this.listener = other.listener; - this.attr = other.attr; - } - - protected internal override bool IsWrite() - { - return attr.hasWrite; - } - - protected internal override void WriteBuffer() - { - SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr); - } - - protected internal override void ParseRow() - { - Key keyOrig = keys[batchIndex]; - - ParseFields(keyOrig, attr.hasWrite); - - BatchRecord record; - - if (resultCode == 0) - { - record = new BatchRecord(keyOrig, ParseRecord(), attr.hasWrite); - } - else if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - Record r = ParseRecord(); - string m = r.GetString("FAILURE"); - - if (m != null) - { - // Need to store record because failure bin contains an error message. - record = new BatchRecord(keyOrig, r, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); - } - else - { - record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); - } - } - else - { - record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); - } - sent[batchIndex] = true; - AsyncBatch.OnRecord(cluster, listener, record, batchIndex); - } - - internal override void SetInDoubt(bool inDoubt) - { - // Set inDoubt for all unsent records, so the listener receives a full set of records. - foreach (int index in batch.offsets) - { - if (!sent[index]) - { - Key key = keys[index]; - BatchRecord record = new(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); - sent[index] = true; - - if (record.inDoubt && policy.Txn != null) - { - policy.Txn.OnWriteInDoubt(record.key); - } - - AsyncBatch.OnRecord(cluster, listener, record, index); - } - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchUDFSequenceCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchUDFSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, sent, listener, attr); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sent, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); - } - } - - //------------------------------------------------------- - // MRT - //------------------------------------------------------- - - public sealed class AsyncBatchTxnVerifyExecutor : AsyncBatchExecutor - { - internal readonly BatchRecordArrayListener listener; - private readonly BatchRecord[] records; - - public AsyncBatchTxnVerifyExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchRecordArrayListener listener, - Key[] keys, - long[] versions, - BatchRecord[] records - ) : base(cluster, true) - { - this.listener = listener; - this.records = records; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, false, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchTxnVerifyCommand(this, cluster, batchNode, policy, policy.Txn, keys, versions, records); - } - this.commands = tasks; - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(records, GetStatus()); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(records, ae); - } - } - - sealed class AsyncBatchTxnVerifyCommand : AsyncBatchCommand - { - private Txn txn; - private Key[] keys; - private long[] versions; - private BatchRecord[] records; - - public AsyncBatchTxnVerifyCommand( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Txn txn, - Key[] keys, - long[] versions, // TODO does this need to be long? - BatchRecord[] records - ) : base(parent, cluster, batch, batchPolicy, false) - { - this.txn = txn; - this.keys = keys; - this.versions = versions; - this.records = records; - } - - public AsyncBatchTxnVerifyCommand(AsyncBatchTxnVerifyCommand other) : base(other) - { - this.txn = other.txn; - this.keys = other.keys; - this.versions = other.versions; - this.records = other.records; - } - - protected internal override void WriteBuffer() - { - SetBatchTxnVerify(batchPolicy, txn, keys, versions, batch); - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - BatchRecord record = records[batchIndex]; - - if (resultCode == ResultCode.OK) - { - record.resultCode = resultCode; - } - else - { - record.SetError(resultCode, false); - parent.SetRowError(); - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchTxnVerifyCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchTxnVerifyCommand(parent, cluster, batchNode, batchPolicy, txn, keys, versions, records); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); - } - } - - public sealed class AsyncBatchTxnRollExecutor : AsyncBatchExecutor - { - internal readonly BatchRecordArrayListener listener; - private readonly BatchRecord[] records; - - public AsyncBatchTxnRollExecutor - ( - AsyncCluster cluster, - BatchPolicy policy, - BatchRecordArrayListener listener, - Key[] keys, - BatchRecord[] records, - BatchAttr attr - ) : base(cluster, true) - { - this.listener = listener; - this.records = records; - - // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, false, this); - AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - tasks[count++] = new AsyncBatchTxnRollCommand(this, cluster, batchNode, policy, keys, records, attr); - } - this.commands = tasks; - } - - protected internal override void OnSuccess() - { - listener.OnSuccess(records, GetStatus()); - } - - protected internal override void OnFailure(AerospikeException ae) - { - listener.OnFailure(records, ae); - } - } - - sealed class AsyncBatchTxnRollCommand : AsyncBatchCommand - { - private Key[] keys; - private BatchRecord[] records; - private BatchAttr attr; - - public AsyncBatchTxnRollCommand( - AsyncBatchExecutor parent, - AsyncCluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - BatchRecord[] records, - BatchAttr attr - ) : base(parent, cluster, batch, batchPolicy, false) - { - this.keys = keys; - this.records = records; - this.attr = attr; - } - - public AsyncBatchTxnRollCommand(AsyncBatchTxnRollCommand other) : base(other) - { - this.keys = other.keys; - this.attr = other.attr; - this.records = other.records; - } - - protected internal override void WriteBuffer() - { - SetBatchTxnRoll(batchPolicy, keys, batch, attr); - } - - protected internal override void ParseRow() - { - SkipKey(fieldCount); - - BatchRecord record = records[batchIndex]; - - if (resultCode == ResultCode.OK) - { - record.resultCode = resultCode; - } - else - { - record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); - parent.SetRowError(); - } - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncBatchTxnRollCommand(this); - } - - internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) - { - return new AsyncBatchTxnRollCommand(parent, cluster, batchNode, batchPolicy, keys, records, attr); - } - - internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, true, parent); - } - } - - //------------------------------------------------------- - // Batch Base Executor - //------------------------------------------------------- - - public abstract class AsyncBatchExecutor : IBatchStatus - { - private AerospikeException exception; - private int max; - private int count; - private readonly bool hasResultCode; - private bool error; - public AsyncBatchCommand[] commands; - public AsyncCluster cluster; - - public AsyncBatchExecutor(AsyncCluster cluster, bool hasResultCode) - { - this.hasResultCode = hasResultCode; - this.cluster = cluster; - cluster.AddCommandCount(); - } - - public void Execute() - { - Execute(commands); - } - - public void Execute(AsyncBatchCommand[] commands) - { - max = commands.Length; - - foreach (AsyncBatchCommand command in commands) - { - command.Execute(); - } - } - - public void Retry(AsyncMultiCommand[] commands) - { - lock (this) - { - // Adjust max for new commands minus failed command. - max += commands.Length - 1; - } - - foreach (AsyncBatchCommand command in commands.Cast()) - { - command.ExecuteBatchRetry(); - } - } - - public void ChildSuccess(AsyncNode node) - { - bool complete; - - lock (this) - { - complete = ++count == max; - } - - if (complete) - { - Finish(); - } - } - - public void ChildFailure(AerospikeException ae) - { - bool complete; - - lock (this) - { - if (exception == null) - { - exception = ae; - } - complete = ++count == max; - } - - if (complete) - { - Finish(); - } - } - - private void Finish() - { - if (exception == null) - { - OnSuccess(); - } - else - { - OnFailure(exception); - } - } - - public virtual void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) - { - // Only used in executors with sequence listeners. - // These executors will override this method. - } - - public void BatchKeyError(AerospikeException ae) - { - error = true; - - if (!hasResultCode) - { - // Legacy batch read commands that do not store a key specific resultCode. - // Store exception which will be passed to the listener on batch completion. - if (exception == null) - { - exception = ae; - } - } - } - - public void SetRowError() - { - // Indicate that a key specific error occurred. - error = true; - } - - public bool GetStatus() - { - return !error; - } - - protected internal abstract void OnSuccess(); - protected internal abstract void OnFailure(AerospikeException ae); - } - - //------------------------------------------------------- - // Batch Base Command - //------------------------------------------------------- - - public abstract class AsyncBatchCommand : AsyncMultiCommand - { - internal readonly AsyncBatchExecutor parent; - internal readonly BatchNode batch; - internal readonly BatchPolicy batchPolicy; - internal uint sequenceAP; - internal uint sequenceSC; - - public AsyncBatchCommand(AsyncBatchExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy batchPolicy, bool isOperation) - : base(cluster, batchPolicy, (AsyncNode)batch.node, isOperation) - { - this.parent = parent; - this.batch = batch; - this.batchPolicy = batchPolicy; - } - - public AsyncBatchCommand(AsyncBatchCommand other) : base(other) - { - this.parent = other.parent; - this.batch = other.batch; - this.batchPolicy = other.batchPolicy; - this.sequenceAP = other.sequenceAP; - this.sequenceSC = other.sequenceSC; - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.BATCH; - } - - protected void ParseFieldsRead(Key key) - { - if (policy.Txn != null) - { - long? version = ParseVersion(fieldCount); - policy.Txn.OnRead(key, version); - } - else - { - SkipKey(fieldCount); - } - } - - protected void ParseFields(Key key, bool hasWrite) - { - if (policy.Txn != null) - { - long? version = ParseVersion(fieldCount); - - if (hasWrite) - { - policy.Txn.OnWrite(key, version, resultCode); - } - else - { - policy.Txn.OnRead(key, version); - } - } - else - { - SkipKey(fieldCount); - } - } - - protected internal override bool PrepareRetry(bool timeout) - { - if (!(policy.replica == Replica.SEQUENCE || policy.replica == Replica.PREFER_RACK)) - { - // Perform regular retry to same node. - return true; - } - - sequenceAP++; - - if (! timeout || policy.readModeSC != ReadModeSC.LINEARIZE) { - sequenceSC++; - } - return false; - } - - protected internal override bool RetryBatch() - { - List batchNodes = null; - - try - { - // Retry requires keys for this node to be split among other nodes. - // This can cause an exponential number of commands. - batchNodes = GenerateBatchNodes(); - - if (batchNodes.Count == 1 && batchNodes[0].node == batch.node) - { - // Batch node is the same. Go through normal retry. - // Normal retries reuse eventArgs, so PutBackArgsOnError() - // should not be called here. - return false; - } - - cluster.AddRetries(batchNodes.Count); - } - catch (Exception) - { - // Close original command. - base.ReleaseBuffer(); - throw; - } - - // Close original command. - base.ReleaseBuffer(); - - // Execute new commands. - AsyncBatchCommand[] cmds = new AsyncBatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - AsyncBatchCommand cmd = CreateCommand(batchNode); - cmd.sequenceAP = sequenceAP; - cmd.sequenceSC = sequenceSC; - cmd.SetBatchRetry(this); - cmds[count++] = cmd; - } - - // Retry new commands. - parent.Retry(cmds); - - // Return true so original batch command is stopped. - return true; - } - - protected internal override void OnSuccess() - { - parent.ChildSuccess(node); - } - - protected internal override void OnFailure(AerospikeException e) - { - SetInDoubt(e.InDoubt); - parent.ChildFailure(e); - } - - internal virtual void SetInDoubt(bool inDoubt) - { - // Do nothing by default. Batch writes will override this method. - } - - internal abstract AsyncBatchCommand CreateCommand(BatchNode batchNode); - internal abstract List GenerateBatchNodes(); - } - - internal class AsyncBatch - { - internal static void OnRecord(Cluster cluster, BatchRecordSequenceListener listener, BatchRecord record, int index) - { - try - { - listener.OnRecord(record, index); - } - catch (Exception e) - { - Log.Error(cluster.context, "Unexpected exception from OnRecord(): " + Util.GetErrorMessage(e)); - } - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + //------------------------------------------------------- + // ReadList + //------------------------------------------------------- + + public sealed class AsyncBatchReadListExecutor : AsyncBatchExecutor + { + private readonly BatchListListener listener; + private readonly List records; + + public AsyncBatchReadListExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchListListener listener, + List records + ) : base(cluster, true) + { + this.listener = listener; + this.records = records; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); + AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new AsyncBatchReadListCommand(this, cluster, batchNode, policy, records); + } + this.commands = commands; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(records); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchReadListCommand : AsyncBatchCommand + { + private readonly List records; + + public AsyncBatchReadListCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + List records + ) : base(parent, cluster, batch, batchPolicy, true) + { + this.records = records; + } + + public AsyncBatchReadListCommand(AsyncBatchReadListCommand other) : base(other) + { + this.records = other.records; + } + + protected internal override void WriteBuffer() + { + if (batch.node.HasBatchAny) + { + SetBatchOperate(batchPolicy, records, batch); + } + else + { + SetBatchRead(batchPolicy, records, batch); + } + } + + protected internal override void ParseRow() + { + BatchRead record = records[batchIndex]; + + ParseFieldsRead(record.key); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + } + else + { + record.SetError(resultCode, false); + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchReadListCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchReadListCommand(parent, cluster, batchNode, batchPolicy, records); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); + } + } + + //------------------------------------------------------- + // ReadSequence + //------------------------------------------------------- + + public sealed class AsyncBatchReadSequenceExecutor : AsyncBatchExecutor + { + private readonly BatchSequenceListener listener; + + public AsyncBatchReadSequenceExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchSequenceListener listener, + List records + ) : base(cluster, true) + { + this.listener = listener; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); + AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new AsyncBatchReadSequenceCommand(this, cluster, batchNode, policy, listener, records); + } + this.commands = commands; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchReadSequenceCommand : AsyncBatchCommand + { + private readonly BatchSequenceListener listener; + private readonly List records; + + public AsyncBatchReadSequenceCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + BatchSequenceListener listener, + List records + ) : base(parent, cluster, batch, batchPolicy, true) + { + this.listener = listener; + this.records = records; + } + + public AsyncBatchReadSequenceCommand(AsyncBatchReadSequenceCommand other) : base(other) + { + this.listener = other.listener; + this.records = other.records; + } + + protected internal override void WriteBuffer() + { + if (batch.node.HasBatchAny) + { + SetBatchOperate(batchPolicy, records, batch); + } + else + { + SetBatchRead(batchPolicy, records, batch); + } + } + + protected internal override void ParseRow() + { + BatchRead record = records[batchIndex]; + + ParseFieldsRead(record.key); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + } + else + { + record.SetError(resultCode, false); + } + listener.OnRecord(record); + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchReadSequenceCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchReadSequenceCommand(parent, cluster, batchNode, batchPolicy, listener, records); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); + } + } + + //------------------------------------------------------- + // GetArray + //------------------------------------------------------- + + public sealed class AsyncBatchGetArrayExecutor : AsyncBatchExecutor + { + private readonly Key[] keys; + private readonly Record[] records; + private readonly RecordArrayListener listener; + + public AsyncBatchGetArrayExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + RecordArrayListener listener, + Key[] keys, + string[] binNames, + Operation[] ops, + int readAttr, + bool isOperation + ) : base(cluster, false) + { + this.keys = keys; + this.records = new Record[keys.Length]; + this.listener = listener; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); + AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new AsyncBatchGetArrayCommand(this, cluster, batchNode, policy, keys, binNames, ops, records, readAttr, isOperation); + } + this.commands = commands; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(keys, records); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(new AerospikeException.BatchRecords(records, ae)); + } + } + + sealed class AsyncBatchGetArrayCommand : AsyncBatchCommand + { + private readonly Key[] keys; + private readonly string[] binNames; + private readonly Operation[] ops; + private readonly Record[] records; + private readonly int readAttr; + + public AsyncBatchGetArrayCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + string[] binNames, + Operation[] ops, + Record[] records, + int readAttr, + bool isOperation + ) : base(parent, cluster, batch, batchPolicy, isOperation) + { + this.keys = keys; + this.binNames = binNames; + this.ops = ops; + this.records = records; + this.readAttr = readAttr; + } + + public AsyncBatchGetArrayCommand(AsyncBatchGetArrayCommand other) : base(other) + { + this.keys = other.keys; + this.binNames = other.binNames; + this.ops = other.ops; + this.records = other.records; + this.readAttr = other.readAttr; + } + + protected internal override void WriteBuffer() + { + if (batch.node.HasBatchAny) + { + BatchAttr attr = new(batchPolicy, readAttr, ops); + SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); + } + else + { + SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr); + } + } + + protected internal override void ParseRow() + { + ParseFieldsRead(keys[batchIndex]); + + if (resultCode == 0) + { + records[batchIndex] = ParseRecord(); + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchGetArrayCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchGetArrayCommand(parent, cluster, batchNode, batchPolicy, keys, binNames, ops, records, readAttr, isOperation); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); + } + } + + //------------------------------------------------------- + // GetSequence + //------------------------------------------------------- + + public sealed class AsyncBatchGetSequenceExecutor : AsyncBatchExecutor + { + private readonly RecordSequenceListener listener; + + public AsyncBatchGetSequenceExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + RecordSequenceListener listener, + Key[] keys, + string[] binNames, + Operation[] ops, + int readAttr, + bool isOperation + ) : base(cluster, false) + { + this.listener = listener; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); + AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new AsyncBatchGetSequenceCommand(this, cluster, batchNode, policy, keys, binNames, ops, listener, readAttr, isOperation); + } + this.commands = commands; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchGetSequenceCommand : AsyncBatchCommand + { + private readonly Key[] keys; + private readonly string[] binNames; + private readonly Operation[] ops; + private readonly RecordSequenceListener listener; + private readonly int readAttr; + + public AsyncBatchGetSequenceCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + string[] binNames, + Operation[] ops, + RecordSequenceListener listener, + int readAttr, + bool isOperation + ) : base(parent, cluster, batch, batchPolicy, isOperation) + { + this.keys = keys; + this.binNames = binNames; + this.ops = ops; + this.listener = listener; + this.readAttr = readAttr; + } + + public AsyncBatchGetSequenceCommand(AsyncBatchGetSequenceCommand other) : base(other) + { + this.keys = other.keys; + this.binNames = other.binNames; + this.ops = other.ops; + this.listener = other.listener; + this.readAttr = other.readAttr; + } + + protected internal override void WriteBuffer() + { + if (batch.node.HasBatchAny) + { + BatchAttr attr = new(batchPolicy, readAttr, ops); + SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); + } + else + { + SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr); + } + } + + protected internal override void ParseRow() + { + Key keyOrig = keys[batchIndex]; + + ParseFieldsRead(keyOrig); + + if (resultCode == 0) + { + Record record = ParseRecord(); + listener.OnRecord(keyOrig, record); + } + else + { + listener.OnRecord(keyOrig, null); + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchGetSequenceCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchGetSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, binNames, ops, listener, readAttr, isOperation); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); + } + } + + //------------------------------------------------------- + // ExistsArray + //------------------------------------------------------- + + public sealed class AsyncBatchExistsArrayExecutor : AsyncBatchExecutor + { + private readonly Key[] keys; + private readonly bool[] existsArray; + private readonly ExistsArrayListener listener; + + public AsyncBatchExistsArrayExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + Key[] keys, + ExistsArrayListener listener + ) : base(cluster,false) + { + this.keys = keys; + this.existsArray = new bool[keys.Length]; + this.listener = listener; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); + AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new AsyncBatchExistsArrayCommand(this, cluster, batchNode, policy, keys, existsArray); + } + this.commands = commands; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(keys, existsArray); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(new AerospikeException.BatchExists(existsArray, ae)); + } + } + + sealed class AsyncBatchExistsArrayCommand : AsyncBatchCommand + { + private readonly Key[] keys; + private readonly bool[] existsArray; + + public AsyncBatchExistsArrayCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + bool[] existsArray + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.keys = keys; + this.existsArray = existsArray; + } + + public AsyncBatchExistsArrayCommand(AsyncBatchExistsArrayCommand other) : base(other) + { + this.keys = other.keys; + this.existsArray = other.existsArray; + } + + protected internal override void WriteBuffer() + { + if (batch.node.HasBatchAny) + { + BatchAttr attr = new(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); + SetBatchOperate(batchPolicy, keys, batch, null, null, attr); + } + else + { + SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA); + } + } + + protected internal override void ParseRow() + { + ParseFieldsRead(keys[batchIndex]); + existsArray[batchIndex] = resultCode == 0; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchExistsArrayCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchExistsArrayCommand(parent, cluster, batchNode, batchPolicy, keys, existsArray); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); + } + } + + //------------------------------------------------------- + // ExistsSequence + //------------------------------------------------------- + + public sealed class AsyncBatchExistsSequenceExecutor : AsyncBatchExecutor + { + private readonly ExistsSequenceListener listener; + + public AsyncBatchExistsSequenceExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + Key[] keys, + ExistsSequenceListener listener + ) : base(cluster, false) + { + this.listener = listener; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, this); + AsyncBatchCommand[] commands = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new AsyncBatchExistsSequenceCommand(this, cluster, batchNode, policy, keys, listener); + } + this.commands = commands; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + + } + + sealed class AsyncBatchExistsSequenceCommand : AsyncBatchCommand + { + private readonly Key[] keys; + private readonly ExistsSequenceListener listener; + + public AsyncBatchExistsSequenceCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + ExistsSequenceListener listener + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.keys = keys; + this.listener = listener; + } + + public AsyncBatchExistsSequenceCommand(AsyncBatchExistsSequenceCommand other) : base(other) + { + this.keys = other.keys; + this.listener = other.listener; + } + + protected internal override void WriteBuffer() + { + if (batch.node.HasBatchAny) + { + BatchAttr attr = new(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); + SetBatchOperate(batchPolicy, keys, batch, null, null, attr); + } + else + { + SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA); + } + } + + protected internal override void ParseRow() + { + Key keyOrig = keys[batchIndex]; + ParseFieldsRead(keyOrig); + listener.OnExists(keyOrig, resultCode == 0); + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchExistsSequenceCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchExistsSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, listener); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); + } + } + + //------------------------------------------------------- + // OperateList + //------------------------------------------------------- + + public sealed class AsyncBatchOperateListExecutor : AsyncBatchExecutor + { + internal readonly BatchOperateListListener listener; + internal readonly List records; + + public AsyncBatchOperateListExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchOperateListListener listener, + List records + ) : base(cluster, true) + { + this.listener = listener; + this.records = records; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchOperateListCommand(this, cluster, batchNode, policy, records); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(records, GetStatus()); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchOperateListCommand : AsyncBatchCommand + { + internal readonly List records; + + public AsyncBatchOperateListCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + List records + ) : base(parent, cluster, batch, batchPolicy, true) + { + this.records = records; + } + + public AsyncBatchOperateListCommand(AsyncBatchOperateListCommand other) : base(other) + { + this.records = other.records; + } + + protected internal override bool IsWrite() + { + // This method is only called to set inDoubt on node level errors. + // SetError() will filter out reads when setting record level inDoubt. + return true; + } + + protected internal override void WriteBuffer() + { + SetBatchOperate(batchPolicy, records, batch); + } + + protected internal override void ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record.key, record.hasWrite); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + return; + } + + if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record r = ParseRecord(); + string m = r.GetString("FAILURE"); + + if (m != null) + { + // Need to store record because failure bin contains an error message. + record.record = r; + record.resultCode = resultCode; + record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter); + parent.SetRowError(); + return; + } + } + + record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); + parent.SetRowError(); + } + + internal override void SetInDoubt(bool inDoubt) + { + if (!inDoubt) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = record.hasWrite; + + if (record.inDoubt && policy.Txn != null) + { + policy.Txn.OnWriteInDoubt(record.key); + } + } + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchOperateListCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchOperateListCommand(parent, cluster, batchNode, batchPolicy, records); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); + } + } + + //------------------------------------------------------- + // OperateSequence + //------------------------------------------------------- + + public sealed class AsyncBatchOperateSequenceExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordSequenceListener listener; + + public AsyncBatchOperateSequenceExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordSequenceListener listener, + List records + ) : base(cluster, true) + { + this.listener = listener; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, records, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchOperateSequenceCommand(this, cluster, batchNode, policy, listener, records); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchOperateSequenceCommand : AsyncBatchCommand + { + internal readonly BatchRecordSequenceListener listener; + internal readonly List records; + + public AsyncBatchOperateSequenceCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + BatchRecordSequenceListener listener, + List records + ) : base(parent, cluster, batch, batchPolicy, true) + { + this.listener = listener; + this.records = records; + } + + public AsyncBatchOperateSequenceCommand(AsyncBatchOperateSequenceCommand other) : base(other) + { + this.listener = other.listener; + this.records = other.records; + } + + protected internal override bool IsWrite() + { + // This method is only called to set inDoubt on node level errors. + // SetError() will filter out reads when setting record level inDoubt. + return true; + } + + protected internal override void WriteBuffer() + { + SetBatchOperate(batchPolicy, records, batch); + } + + protected internal override void ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record.key, record.hasWrite); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + } + else if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record r = ParseRecord(); + string m = r.GetString("FAILURE"); + + if (m != null) + { + // Need to store record because failure bin contains an error message. + record.record = r; + record.resultCode = resultCode; + record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter); + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); + } + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); + } + AsyncBatch.OnRecord(cluster, listener, record, batchIndex); + } + + internal override void SetInDoubt(bool inDoubt) + { + if (!inDoubt) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + // Set inDoubt, but do not call OnRecord() because user already has access to full + // BatchRecord list and can examine each record for inDoubt when the exception occurs. + record.inDoubt = record.hasWrite; + + if (record.inDoubt && policy.Txn != null) + { + policy.Txn.OnWriteInDoubt(record.key); + } + } + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchOperateSequenceCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchOperateSequenceCommand(parent, cluster, batchNode, batchPolicy, listener, records); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, parent); + } + } + + //------------------------------------------------------- + // OperateRecordArray + //------------------------------------------------------- + + public sealed class AsyncBatchOperateRecordArrayExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordArrayListener listener; + internal readonly BatchRecord[] records; + + public AsyncBatchOperateRecordArrayExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordArrayListener listener, + Key[] keys, + Operation[] ops, + BatchAttr attr + ) : base(cluster, true) + { + this.listener = listener; + this.records = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + this.records[i] = new BatchRecord(keys[i], attr.hasWrite); + } + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, attr.hasWrite, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchOperateRecordArrayCommand(this, cluster, batchNode, policy, keys, ops, records, attr); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(records, GetStatus()); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(records, ae); + } + } + + sealed class AsyncBatchOperateRecordArrayCommand : AsyncBatchCommand + { + internal readonly Key[] keys; + internal readonly Operation[] ops; + internal readonly BatchRecord[] records; + internal readonly BatchAttr attr; + + public AsyncBatchOperateRecordArrayCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + Operation[] ops, + BatchRecord[] records, + BatchAttr attr + ) : base(parent, cluster, batch, batchPolicy, ops != null) + { + this.keys = keys; + this.ops = ops; + this.records = records; + this.attr = attr; + } + + public AsyncBatchOperateRecordArrayCommand(AsyncBatchOperateRecordArrayCommand other) : base(other) + { + this.keys = other.keys; + this.ops = other.ops; + this.records = other.records; + this.attr = other.attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchOperate(batchPolicy, keys, batch, null, ops, attr); + } + + protected internal override void ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record.key, record.hasWrite); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + parent.SetRowError(); + } + } + + internal override void SetInDoubt(bool inDoubt) + { + if (!inDoubt || !attr.hasWrite) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = true; + + policy.Txn?.OnWriteInDoubt(record.key); + } + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchOperateRecordArrayCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchOperateRecordArrayCommand(parent, cluster, batchNode, batchPolicy, keys, ops, records, attr); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); + } + } + + //------------------------------------------------------- + // OperateRecordSequence + //------------------------------------------------------- + + public sealed class AsyncBatchOperateRecordSequenceExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordSequenceListener listener; + private readonly bool[] sent; + + public AsyncBatchOperateRecordSequenceExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordSequenceListener listener, + Key[] keys, + Operation[] ops, + BatchAttr attr + ) : base(cluster, true) + { + this.listener = listener; + this.sent = new bool[keys.Length]; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, attr.hasWrite, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchOperateRecordSequenceCommand(this, cluster, batchNode, policy, keys, ops, sent, listener, attr); + } + this.commands = tasks; + } + + public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) + { + BatchRecord record = new(key, null, ae.Result, inDoubt, hasWrite); + sent[index] = true; + AsyncBatch.OnRecord(cluster, listener, record, index); + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchOperateRecordSequenceCommand : AsyncBatchCommand + { + internal readonly Key[] keys; + internal readonly Operation[] ops; + internal readonly bool[] sent; + internal readonly BatchRecordSequenceListener listener; + internal readonly BatchAttr attr; + + public AsyncBatchOperateRecordSequenceCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + Operation[] ops, + bool[] sent, + BatchRecordSequenceListener listener, + BatchAttr attr + ) : base(parent, cluster, batch, batchPolicy, ops != null) + { + this.keys = keys; + this.ops = ops; + this.sent = sent; + this.listener = listener; + this.attr = attr; + } + + public AsyncBatchOperateRecordSequenceCommand(AsyncBatchOperateRecordSequenceCommand other) : base(other) + { + this.keys = other.keys; + this.ops = other.ops; + this.sent = other.sent; + this.listener = other.listener; + this.attr = other.attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchOperate(batchPolicy, keys, batch, null, ops, attr); + } + + protected internal override void ParseRow() + { + Key keyOrig = keys[batchIndex]; + + ParseFields(keyOrig, attr.hasWrite); + + BatchRecord record; + + if (resultCode == 0) + { + record = new BatchRecord(keyOrig, ParseRecord(), attr.hasWrite); + } + else + { + record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); + } + sent[batchIndex] = true; + AsyncBatch.OnRecord(cluster, listener, record, batchIndex); + } + + internal override void SetInDoubt(bool inDoubt) + { + // Set inDoubt for all unsent records, so the listener receives a full set of records. + foreach (int index in batch.offsets) + { + if (!sent[index]) + { + Key key = keys[index]; + BatchRecord record = new(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); + sent[index] = true; + + if (record.inDoubt && policy.Txn != null) + { + policy.Txn.OnWriteInDoubt(key); + } + + AsyncBatch.OnRecord(cluster, listener, record, index); + } + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchOperateRecordSequenceCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchOperateRecordSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, ops, sent, listener, attr); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sent, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); + } + } + + //------------------------------------------------------- + // UDFArray + //------------------------------------------------------- + + public sealed class AsyncBatchUDFArrayExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordArrayListener listener; + internal readonly BatchRecord[] recordArray; + + public AsyncBatchUDFArrayExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordArrayListener listener, + Key[] keys, + string packageName, + string functionName, + byte[] argBytes, + BatchAttr attr + ) : base(cluster, true) + { + this.listener = listener; + this.recordArray = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + this.recordArray[i] = new BatchRecord(keys[i], attr.hasWrite); + } + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, recordArray, attr.hasWrite, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchUDFArrayCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, recordArray, attr); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(recordArray, GetStatus()); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(recordArray, ae); + } + } + + public sealed class AsyncBatchUDFArrayCommand : AsyncBatchCommand + { + internal readonly Key[] keys; + internal readonly string packageName; + internal readonly string functionName; + internal readonly byte[] argBytes; + internal readonly BatchRecord[] records; + internal readonly BatchAttr attr; + + public AsyncBatchUDFArrayCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + string packageName, + string functionName, + byte[] argBytes, + BatchRecord[] records, + BatchAttr attr + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.keys = keys; + this.packageName = packageName; + this.functionName = functionName; + this.argBytes = argBytes; + this.records = records; + this.attr = attr; + } + + public AsyncBatchUDFArrayCommand(AsyncBatchUDFArrayCommand other) : base(other) + { + this.keys = other.keys; + this.packageName = other.packageName; + this.functionName = other.functionName; + this.argBytes = other.argBytes; + this.records = other.records; + this.attr = other.attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr); + } + + protected internal override void ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record.key, record.hasWrite); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + return; + } + + if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record r = ParseRecord(); + string m = r.GetString("FAILURE"); + + if (m != null) + { + // Need to store record because failure bin contains an error message. + record.record = r; + record.resultCode = resultCode; + record.inDoubt = Command.BatchInDoubt(attr.hasWrite, commandSentCounter); + parent.SetRowError(); + return; + } + } + + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + parent.SetRowError(); + } + + internal override void SetInDoubt(bool inDoubt) + { + if (!inDoubt || !attr.hasWrite) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = true; + + policy.Txn?.OnWriteInDoubt(record.key); + } + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchUDFArrayCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchUDFArrayCommand(parent, cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); + } + } + + //------------------------------------------------------- + // UDFSequence + //------------------------------------------------------- + + public sealed class AsyncBatchUDFSequenceExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordSequenceListener listener; + private readonly bool[] sent; + + public AsyncBatchUDFSequenceExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordSequenceListener listener, + Key[] keys, + string packageName, + string functionName, + byte[] argBytes, + BatchAttr attr + ) : base(cluster, true) + { + this.listener = listener; + this.sent = new bool[keys.Length]; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, attr.hasWrite, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchUDFSequenceCommand(this, cluster, batchNode, policy, keys, packageName, functionName, argBytes, sent, listener, attr); + } + this.commands = tasks; + } + + public override void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) + { + BatchRecord record = new(key, null, ae.Result, inDoubt, hasWrite); + sent[index] = true; + AsyncBatch.OnRecord(cluster, listener, record, index); + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(ae); + } + } + + sealed class AsyncBatchUDFSequenceCommand : AsyncBatchCommand + { + internal readonly Key[] keys; + internal readonly string packageName; + internal readonly string functionName; + internal readonly byte[] argBytes; + internal readonly bool[] sent; + internal readonly BatchRecordSequenceListener listener; + internal readonly BatchAttr attr; + + public AsyncBatchUDFSequenceCommand + ( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + string packageName, + string functionName, + byte[] argBytes, + bool[] sent, + BatchRecordSequenceListener listener, + BatchAttr attr + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.keys = keys; + this.packageName = packageName; + this.functionName = functionName; + this.argBytes = argBytes; + this.sent = sent; + this.listener = listener; + this.attr = attr; + } + + public AsyncBatchUDFSequenceCommand(AsyncBatchUDFSequenceCommand other) : base(other) + { + this.keys = other.keys; + this.packageName = other.packageName; + this.functionName = other.functionName; + this.argBytes = other.argBytes; + this.sent = other.sent; + this.listener = other.listener; + this.attr = other.attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr); + } + + protected internal override void ParseRow() + { + Key keyOrig = keys[batchIndex]; + + ParseFields(keyOrig, attr.hasWrite); + + BatchRecord record; + + if (resultCode == 0) + { + record = new BatchRecord(keyOrig, ParseRecord(), attr.hasWrite); + } + else if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record r = ParseRecord(); + string m = r.GetString("FAILURE"); + + if (m != null) + { + // Need to store record because failure bin contains an error message. + record = new BatchRecord(keyOrig, r, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); + } + else + { + record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); + } + } + else + { + record = new BatchRecord(keyOrig, null, resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); + } + sent[batchIndex] = true; + AsyncBatch.OnRecord(cluster, listener, record, batchIndex); + } + + internal override void SetInDoubt(bool inDoubt) + { + // Set inDoubt for all unsent records, so the listener receives a full set of records. + foreach (int index in batch.offsets) + { + if (!sent[index]) + { + Key key = keys[index]; + BatchRecord record = new(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); + sent[index] = true; + + if (record.inDoubt && policy.Txn != null) + { + policy.Txn.OnWriteInDoubt(record.key); + } + + AsyncBatch.OnRecord(cluster, listener, record, index); + } + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchUDFSequenceCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchUDFSequenceCommand(parent, cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, sent, listener, attr); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sent, sequenceAP, sequenceSC, batch, attr.hasWrite, parent); + } + } + + //------------------------------------------------------- + // MRT + //------------------------------------------------------- + + public sealed class AsyncBatchTxnVerifyExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordArrayListener listener; + private readonly BatchRecord[] records; + + public AsyncBatchTxnVerifyExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordArrayListener listener, + Key[] keys, + long?[] versions, + BatchRecord[] records + ) : base(cluster, true) + { + this.listener = listener; + this.records = records; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, false, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchTxnVerifyCommand(this, cluster, batchNode, policy, keys, versions, records); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(records, GetStatus()); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(records, ae); + } + } + + sealed class AsyncBatchTxnVerifyCommand : AsyncBatchCommand + { + private readonly Key[] keys; + private readonly long?[] versions; + private readonly BatchRecord[] records; + + public AsyncBatchTxnVerifyCommand( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + long?[] versions, + BatchRecord[] records + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.keys = keys; + this.versions = versions; + this.records = records; + } + + public AsyncBatchTxnVerifyCommand(AsyncBatchTxnVerifyCommand other) : base(other) + { + this.keys = other.keys; + this.versions = other.versions; + this.records = other.records; + } + + protected internal override void WriteBuffer() + { + SetBatchTxnVerify(batchPolicy, keys, versions, batch); + } + + protected internal override void ParseRow() + { + SkipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == ResultCode.OK) + { + record.resultCode = resultCode; + } + else + { + record.SetError(resultCode, false); + parent.SetRowError(); + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchTxnVerifyCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchTxnVerifyCommand(parent, cluster, batchNode, batchPolicy, keys, versions, records); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); + } + } + + public sealed class AsyncBatchTxnRollExecutor : AsyncBatchExecutor + { + internal readonly BatchRecordArrayListener listener; + private readonly BatchRecord[] records; + + public AsyncBatchTxnRollExecutor + ( + AsyncCluster cluster, + BatchPolicy policy, + BatchRecordArrayListener listener, + Txn txn, + Key[] keys, + BatchRecord[] records, + BatchAttr attr + ) : base(cluster, true) + { + this.listener = listener; + this.records = records; + + // Create commands. + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, false, this); + AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + tasks[count++] = new AsyncBatchTxnRollCommand(this, cluster, batchNode, policy, txn, keys, records, attr); + } + this.commands = tasks; + } + + protected internal override void OnSuccess() + { + listener.OnSuccess(records, GetStatus()); + } + + protected internal override void OnFailure(AerospikeException ae) + { + listener.OnFailure(records, ae); + } + } + + sealed class AsyncBatchTxnRollCommand : AsyncBatchCommand + { + private readonly Txn txn; + private readonly Key[] keys; + private readonly BatchRecord[] records; + private readonly BatchAttr attr; + + public AsyncBatchTxnRollCommand( + AsyncBatchExecutor parent, + AsyncCluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Txn txn, + Key[] keys, + BatchRecord[] records, + BatchAttr attr + ) : base(parent, cluster, batch, batchPolicy, false) + { + this.txn = txn; + this.keys = keys; + this.records = records; + this.attr = attr; + } + + public AsyncBatchTxnRollCommand(AsyncBatchTxnRollCommand other) : base(other) + { + this.keys = other.keys; + this.attr = other.attr; + this.records = other.records; + } + + protected internal override void WriteBuffer() + { + SetBatchTxnRoll(batchPolicy, txn, keys, batch, attr); + } + + protected internal override void ParseRow() + { + SkipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == ResultCode.OK) + { + record.resultCode = resultCode; + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + parent.SetRowError(); + } + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncBatchTxnRollCommand(this); + } + + internal override AsyncBatchCommand CreateCommand(BatchNode batchNode) + { + return new AsyncBatchTxnRollCommand(parent, cluster, batchNode, batchPolicy, txn, keys, records, attr); + } + + internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, true, parent); + } + } + + //------------------------------------------------------- + // Batch Base Executor + //------------------------------------------------------- + + public abstract class AsyncBatchExecutor : IBatchStatus + { + private AerospikeException exception; + private int max; + private int count; + private readonly bool hasResultCode; + private bool error; + public AsyncBatchCommand[] commands; + public AsyncCluster cluster; + + public AsyncBatchExecutor(AsyncCluster cluster, bool hasResultCode) + { + this.hasResultCode = hasResultCode; + this.cluster = cluster; + cluster.AddCommandCount(); + } + + public void Execute() + { + Execute(commands); + } + + public void Execute(AsyncBatchCommand[] commands) + { + max = commands.Length; + + foreach (AsyncBatchCommand command in commands) + { + command.Execute(); + } + } + + public void Retry(AsyncMultiCommand[] commands) + { + lock (this) + { + // Adjust max for new commands minus failed command. + max += commands.Length - 1; + } + + foreach (AsyncBatchCommand command in commands.Cast()) + { + command.ExecuteBatchRetry(); + } + } + + public void ChildSuccess(AsyncNode node) + { + bool complete; + + lock (this) + { + complete = ++count == max; + } + + if (complete) + { + Finish(); + } + } + + public void ChildFailure(AerospikeException ae) + { + bool complete; + + lock (this) + { + if (exception == null) + { + exception = ae; + } + complete = ++count == max; + } + + if (complete) + { + Finish(); + } + } + + private void Finish() + { + if (exception == null) + { + OnSuccess(); + } + else + { + OnFailure(exception); + } + } + + public virtual void BatchKeyError(Cluster cluster, Key key, int index, AerospikeException ae, bool inDoubt, bool hasWrite) + { + // Only used in executors with sequence listeners. + // These executors will override this method. + } + + public void BatchKeyError(AerospikeException ae) + { + error = true; + + if (!hasResultCode) + { + // Legacy batch read commands that do not store a key specific resultCode. + // Store exception which will be passed to the listener on batch completion. + if (exception == null) + { + exception = ae; + } + } + } + + public void SetRowError() + { + // Indicate that a key specific error occurred. + error = true; + } + + public bool GetStatus() + { + return !error; + } + + protected internal abstract void OnSuccess(); + protected internal abstract void OnFailure(AerospikeException ae); + } + + //------------------------------------------------------- + // Batch Base Command + //------------------------------------------------------- + + public abstract class AsyncBatchCommand : AsyncMultiCommand + { + internal readonly AsyncBatchExecutor parent; + internal readonly BatchNode batch; + internal readonly BatchPolicy batchPolicy; + internal uint sequenceAP; + internal uint sequenceSC; + + public AsyncBatchCommand(AsyncBatchExecutor parent, AsyncCluster cluster, BatchNode batch, BatchPolicy batchPolicy, bool isOperation) + : base(cluster, batchPolicy, (AsyncNode)batch.node, isOperation) + { + this.parent = parent; + this.batch = batch; + this.batchPolicy = batchPolicy; + } + + public AsyncBatchCommand(AsyncBatchCommand other) : base(other) + { + this.parent = other.parent; + this.batch = other.batch; + this.batchPolicy = other.batchPolicy; + this.sequenceAP = other.sequenceAP; + this.sequenceSC = other.sequenceSC; + } + + protected override Latency.LatencyType GetLatencyType() + { + return Latency.LatencyType.BATCH; + } + + protected void ParseFieldsRead(Key key) + { + if (policy.Txn != null) + { + long? version = ParseVersion(fieldCount); + policy.Txn.OnRead(key, version); + } + else + { + SkipKey(fieldCount); + } + } + + protected void ParseFields(Key key, bool hasWrite) + { + if (policy.Txn != null) + { + long? version = ParseVersion(fieldCount); + + if (hasWrite) + { + policy.Txn.OnWrite(key, version, resultCode); + } + else + { + policy.Txn.OnRead(key, version); + } + } + else + { + SkipKey(fieldCount); + } + } + + protected internal override bool PrepareRetry(bool timeout) + { + if (!(policy.replica == Replica.SEQUENCE || policy.replica == Replica.PREFER_RACK)) + { + // Perform regular retry to same node. + return true; + } + + sequenceAP++; + + if (! timeout || policy.readModeSC != ReadModeSC.LINEARIZE) { + sequenceSC++; + } + return false; + } + + protected internal override bool RetryBatch() + { + List batchNodes = null; + + try + { + // Retry requires keys for this node to be split among other nodes. + // This can cause an exponential number of commands. + batchNodes = GenerateBatchNodes(); + + if (batchNodes.Count == 1 && batchNodes[0].node == batch.node) + { + // Batch node is the same. Go through normal retry. + // Normal retries reuse eventArgs, so PutBackArgsOnError() + // should not be called here. + return false; + } + + cluster.AddRetries(batchNodes.Count); + } + catch (Exception) + { + // Close original command. + base.ReleaseBuffer(); + throw; + } + + // Close original command. + base.ReleaseBuffer(); + + // Execute new commands. + AsyncBatchCommand[] cmds = new AsyncBatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + AsyncBatchCommand cmd = CreateCommand(batchNode); + cmd.sequenceAP = sequenceAP; + cmd.sequenceSC = sequenceSC; + cmd.SetBatchRetry(this); + cmds[count++] = cmd; + } + + // Retry new commands. + parent.Retry(cmds); + + // Return true so original batch command is stopped. + return true; + } + + protected internal override void OnSuccess() + { + parent.ChildSuccess(node); + } + + protected internal override void OnFailure(AerospikeException e) + { + SetInDoubt(e.InDoubt); + parent.ChildFailure(e); + } + + internal virtual void SetInDoubt(bool inDoubt) + { + // Do nothing by default. Batch writes will override this method. + } + + internal abstract AsyncBatchCommand CreateCommand(BatchNode batchNode); + internal abstract List GenerateBatchNodes(); + } + + internal class AsyncBatch + { + internal static void OnRecord(Cluster cluster, BatchRecordSequenceListener listener, BatchRecord record, int index) + { + try + { + listener.OnRecord(record, index); + } + catch (Exception e) + { + Log.Error(cluster.context, "Unexpected exception from OnRecord(): " + Util.GetErrorMessage(e)); + } + } + } +} diff --git a/AerospikeClient/Async/AsyncDelete.cs b/AerospikeClient/Async/AsyncDelete.cs index 2cdb45f1..cafbd036 100644 --- a/AerospikeClient/Async/AsyncDelete.cs +++ b/AerospikeClient/Async/AsyncDelete.cs @@ -1,92 +1,93 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class AsyncDelete : AsyncWriteBase - { - private readonly DeleteListener listener; - private bool existed; - - public AsyncDelete(AsyncCluster cluster, WritePolicy writePolicy, Key key, DeleteListener listener) - : base(cluster, writePolicy, key) - { - this.listener = listener; - } - - public AsyncDelete(AsyncDelete other) - : base(other) - { - this.listener = other.listener; - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncDelete(this); - } - - protected internal override void WriteBuffer() - { - SetDelete(writePolicy, Key); - } - - protected internal override bool ParseResult() - { - ParseHeader(); - - if (resultCode == ResultCode.OK) - { - existed = true; - return true; - } - - if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) - { - existed = false; - return true; - } - - if (resultCode == ResultCode.FILTERED_OUT) - { - if (policy.failOnFilteredOut) - { - throw new AerospikeException(resultCode); - } - existed = true; - return true; - } - - throw new AerospikeException(resultCode); - } - - protected internal override void OnSuccess() - { - if (listener != null) - { - listener.OnSuccess(Key, existed); - } - } - - protected internal override void OnFailure(AerospikeException e) - { - if (listener != null) - { - listener.OnFailure(e); - } - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class AsyncDelete : AsyncWriteBase + { + private readonly DeleteListener listener; + private bool existed; + + public AsyncDelete(AsyncCluster cluster, WritePolicy writePolicy, Key key, DeleteListener listener) + : base(cluster, writePolicy, key) + { + this.listener = listener; + } + + public AsyncDelete(AsyncDelete other) + : base(other) + { + this.listener = other.listener; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncDelete(this); + } + + protected internal override void WriteBuffer() + { + SetDelete(writePolicy, Key); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + ParseFields(policy.Txn, Key, true); + + if (resultCode == ResultCode.OK) + { + existed = true; + return true; + } + + if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) + { + existed = false; + return true; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + existed = true; + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key, existed); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + } + } +} diff --git a/AerospikeClient/Async/AsyncMultiCommand.cs b/AerospikeClient/Async/AsyncMultiCommand.cs index a520d9f5..3af3225b 100644 --- a/AerospikeClient/Async/AsyncMultiCommand.cs +++ b/AerospikeClient/Async/AsyncMultiCommand.cs @@ -1,142 +1,137 @@ -/* - * Copyright 2012-2022 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using System.Collections.Generic; - -namespace Aerospike.Client -{ - public abstract class AsyncMultiCommand : AsyncCommand - { - protected internal readonly AsyncNode serverNode; - protected internal int info3; - protected internal int resultCode; - protected internal int generation; - protected internal int expiration; - protected internal int batchIndex; - protected internal int fieldCount; - protected internal int opCount; - protected internal readonly bool isOperation; - protected internal volatile bool valid = true; - - /// - /// Batch constructor. - /// - public AsyncMultiCommand(AsyncCluster cluster, Policy policy, AsyncNode node, bool isOperation) - : base(cluster, policy) - { - this.serverNode = node; - this.isOperation = isOperation; - } - - /// - /// Scan/Query constructor. - /// - public AsyncMultiCommand(AsyncCluster cluster, Policy policy, AsyncNode node, int socketTimeout, int totalTimeout) - : base(cluster, policy, socketTimeout, totalTimeout) - { - this.serverNode = node; - this.isOperation = false; - } - - public AsyncMultiCommand(AsyncMultiCommand other) : base(other) - { - this.serverNode = other.serverNode; - this.isOperation = other.isOperation; - } - - protected internal sealed override void ParseCommand() - { - if (!valid) - { - throw new AerospikeException.QueryTerminated(); - } - - if (ParseGroup()) - { - Finish(); - return; - } - - // Prepare for next group. - ReceiveNext(); - } - - protected internal override Node GetNode(Cluster cluster) - { - return serverNode; - } - - protected internal override bool PrepareRetry(bool timeout) - { - return true; - } - - private bool ParseGroup() - { - // Parse each message response and add it to the result array - while (dataOffset < dataLength) - { - dataOffset += 3; - info3 = dataBuffer[dataOffset]; - dataOffset += 2; - resultCode = dataBuffer[dataOffset]; - - // If this is the end marker of the response, do not proceed further. - if ((info3 & Command.INFO3_LAST) != 0) - { - if (resultCode != 0) - { - // The server returned a fatal error. - throw new AerospikeException(resultCode); - } - return true; - } - - dataOffset++; - generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - batchIndex = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - - ParseRow(); - } - return false; - } - - protected internal abstract void ParseRow(); - - protected internal Record ParseRecord() - { - if (opCount <= 0) - { - return new Record(null, generation, expiration); - } - - return policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); - } - - protected internal void Stop() - { - valid = false; - } - } -} +/* + * Copyright 2012-2022 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using System.Collections.Generic; + +namespace Aerospike.Client +{ + public abstract class AsyncMultiCommand : AsyncCommand + { + protected internal readonly AsyncNode serverNode; + protected internal int info3; + protected internal int batchIndex; + protected internal readonly bool isOperation; + protected internal volatile bool valid = true; + + /// + /// Batch constructor. + /// + public AsyncMultiCommand(AsyncCluster cluster, Policy policy, AsyncNode node, bool isOperation) + : base(cluster, policy) + { + this.serverNode = node; + this.isOperation = isOperation; + } + + /// + /// Scan/Query constructor. + /// + public AsyncMultiCommand(AsyncCluster cluster, Policy policy, AsyncNode node, int socketTimeout, int totalTimeout) + : base(cluster, policy, socketTimeout, totalTimeout) + { + this.serverNode = node; + this.isOperation = false; + } + + public AsyncMultiCommand(AsyncMultiCommand other) : base(other) + { + this.serverNode = other.serverNode; + this.isOperation = other.isOperation; + } + + protected internal sealed override void ParseCommand() + { + if (!valid) + { + throw new AerospikeException.QueryTerminated(); + } + + if (ParseGroup()) + { + Finish(); + return; + } + + // Prepare for next group. + ReceiveNext(); + } + + protected internal override Node GetNode(Cluster cluster) + { + return serverNode; + } + + protected internal override bool PrepareRetry(bool timeout) + { + return true; + } + + private bool ParseGroup() + { + // Parse each message response and add it to the result array + while (dataOffset < dataLength) + { + dataOffset += 3; + info3 = dataBuffer[dataOffset]; + dataOffset += 2; + resultCode = dataBuffer[dataOffset]; + + // If this is the end marker of the response, do not proceed further. + if ((info3 & Command.INFO3_LAST) != 0) + { + if (resultCode != 0) + { + // The server returned a fatal error. + throw new AerospikeException(resultCode); + } + return true; + } + + dataOffset++; + generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + batchIndex = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + + ParseRow(); + } + return false; + } + + protected internal abstract void ParseRow(); + + protected internal Record ParseRecord() + { + if (opCount <= 0) + { + return new Record(null, generation, expiration); + } + + return policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); + } + + protected internal void Stop() + { + valid = false; + } + } +} diff --git a/AerospikeClient/Async/AsyncSingleCommand.cs b/AerospikeClient/Async/AsyncSingleCommand.cs index a1847403..696b21eb 100644 --- a/AerospikeClient/Async/AsyncSingleCommand.cs +++ b/AerospikeClient/Async/AsyncSingleCommand.cs @@ -1,128 +1,51 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public abstract class AsyncSingleCommand : AsyncCommand - { - protected int resultCode; - protected int generation; - protected int expiration; - protected int fieldCount; - protected int opCount; - - public AsyncSingleCommand(AsyncCluster cluster, Policy policy) - : base(cluster, policy) - { - } - - public AsyncSingleCommand(AsyncSingleCommand other) - : base(other) - { - } - - protected internal sealed override void ParseCommand() - { - ParseResult(); - Finish(); - } - - protected void ParseHeader() - { - resultCode = dataBuffer[dataOffset + 5]; - generation = ByteUtil.BytesToInt(dataBuffer, dataOffset + 6); - expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset + 10); - fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset + 18); - opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset + 20); - dataOffset += Command.MSG_REMAINING_HEADER_SIZE; - } - - protected void ParseFields(Txn txn, Key key, bool hasWrite) - { - if (txn == null) - { - SkipFields(fieldCount); - return; - } - - long? version = null; - - for (int i = 0; i < fieldCount; i++) - { - int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - - int type = dataBuffer[dataOffset++]; - int size = len - 1; - - if (type == FieldType.RECORD_VERSION) - { - if (size == 7) - { - version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset); - } - else - { - throw new AerospikeException("Record version field has invalid size: " + size); - } - } - dataOffset += size; - } - - if (hasWrite) - { - txn.OnWrite(key, version, resultCode); - } - else - { - txn.OnRead(key, version); - } - } - - protected void SkipFields(int fieldCount) - { - // There can be fields in the response (setname etc). - // But for now, ignore them. Expose them to the API if needed in the future. - for (int i = 0; i < fieldCount; i++) - { - int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4 + fieldlen; - } - } - - protected void ParseTxnDeadline(Txn txn) - { - for (int i = 0; i < fieldCount; i++) - { - int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - - int type = dataBuffer[dataOffset++]; - int size = len - 1; - - if (type == FieldType.MRT_DEADLINE) - { - int deadline = ByteUtil.LittleBytesToInt(dataBuffer, dataOffset); - txn.Deadline = deadline; - } - dataOffset += size; - } - } - - protected internal abstract bool ParseResult(); - } -} +/* + * Copyright 2012-2023 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public abstract class AsyncSingleCommand : AsyncCommand + { + public AsyncSingleCommand(AsyncCluster cluster, Policy policy) + : base(cluster, policy) + { + } + + public AsyncSingleCommand(AsyncSingleCommand other) + : base(other) + { + } + + protected internal sealed override void ParseCommand() + { + ParseResult(); + Finish(); + } + + protected void ParseHeader() + { + resultCode = dataBuffer[dataOffset + 5]; + generation = ByteUtil.BytesToInt(dataBuffer, dataOffset + 6); + expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset + 10); + fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset + 18); + opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset + 20); + dataOffset += Command.MSG_REMAINING_HEADER_SIZE; + } + + protected internal abstract bool ParseResult(); + } +} + \ No newline at end of file diff --git a/AerospikeClient/Async/AsyncTouch.cs b/AerospikeClient/Async/AsyncTouch.cs index bd998ff3..9e8ed5bf 100644 --- a/AerospikeClient/Async/AsyncTouch.cs +++ b/AerospikeClient/Async/AsyncTouch.cs @@ -1,83 +1,84 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class AsyncTouch : AsyncWriteBase - { - private readonly WriteListener listener; - - public AsyncTouch(AsyncCluster cluster, WritePolicy writePolicy, WriteListener listener, Key key) - : base(cluster, writePolicy, key) - { - this.listener = listener; - } - - public AsyncTouch(AsyncTouch other) - : base(other) - { - this.listener = other.listener; - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncTouch(this); - } - - protected internal override void WriteBuffer() - { - SetTouch(writePolicy, Key); - } - - protected internal override bool ParseResult() - { - ParseHeader(); - - if (resultCode == ResultCode.OK) - { - return true; - } - - if (resultCode == ResultCode.FILTERED_OUT) - { - if (policy.failOnFilteredOut) - { - throw new AerospikeException(resultCode); - } - return true; - } - - throw new AerospikeException(resultCode); - } - - protected internal override void OnSuccess() - { - if (listener != null) - { - listener.OnSuccess(Key); - } - } - - protected internal override void OnFailure(AerospikeException e) - { - if (listener != null) - { - listener.OnFailure(e); - } - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class AsyncTouch : AsyncWriteBase + { + private readonly WriteListener listener; + + public AsyncTouch(AsyncCluster cluster, WritePolicy writePolicy, WriteListener listener, Key key) + : base(cluster, writePolicy, key) + { + this.listener = listener; + } + + public AsyncTouch(AsyncTouch other) + : base(other) + { + this.listener = other.listener; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncTouch(this); + } + + protected internal override void WriteBuffer() + { + SetTouch(writePolicy, Key); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + ParseFields(policy.Txn, Key, true); + + if (resultCode == ResultCode.OK) + { + return true; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + } + } +} diff --git a/AerospikeClient/Async/AsyncTxnClose.cs b/AerospikeClient/Async/AsyncTxnClose.cs index e982e9a0..03c2376c 100644 --- a/AerospikeClient/Async/AsyncTxnClose.cs +++ b/AerospikeClient/Async/AsyncTxnClose.cs @@ -1,88 +1,89 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class AsyncTxnClose : AsyncWriteBase - { - private readonly Txn txn; - private readonly DeleteListener listener; - - public AsyncTxnClose - ( - AsyncCluster cluster, - Txn txn, - DeleteListener listener, - WritePolicy writePolicy, - Key key - ) : base(cluster, writePolicy, key) - { - this.txn = txn; - this.listener = listener; - } - - public AsyncTxnClose(AsyncTxnClose other) - : base(other) - { - this.txn = other.txn; - this.listener = other.listener; - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncTxnClose(this); - } - - protected internal override void WriteBuffer() - { - SetTxnClose(txn, Key); - } - - protected internal override bool ParseResult() - { - ParseHeader(); - - if (resultCode == ResultCode.OK || resultCode == ResultCode.KEY_NOT_FOUND_ERROR) - { - return true; - } - - throw new AerospikeException(resultCode); - } - - protected internal override void OnInDoubt() - { - } - - protected internal override void OnSuccess() - { - if (listener != null) - { - listener.OnSuccess(Key, true); - } - } - - protected internal override void OnFailure(AerospikeException e) - { - if (listener != null) - { - listener.OnFailure(e); - } - } - } -} - +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class AsyncTxnClose : AsyncWriteBase + { + private readonly Txn txn; + private readonly DeleteListener listener; + + public AsyncTxnClose + ( + AsyncCluster cluster, + Txn txn, + DeleteListener listener, + WritePolicy writePolicy, + Key key + ) : base(cluster, writePolicy, key) + { + this.txn = txn; + this.listener = listener; + } + + public AsyncTxnClose(AsyncTxnClose other) + : base(other) + { + this.txn = other.txn; + this.listener = other.listener; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncTxnClose(this); + } + + protected internal override void WriteBuffer() + { + SetTxnClose(txn, Key); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + ParseFields(policy.Txn, Key, true); + + if (resultCode == ResultCode.OK || resultCode == ResultCode.KEY_NOT_FOUND_ERROR) + { + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnInDoubt() + { + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key, true); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + } + } +} + diff --git a/AerospikeClient/Async/AsyncTxnMarkRollForward.cs b/AerospikeClient/Async/AsyncTxnMarkRollForward.cs index c7ca4f46..f4466307 100644 --- a/AerospikeClient/Async/AsyncTxnMarkRollForward.cs +++ b/AerospikeClient/Async/AsyncTxnMarkRollForward.cs @@ -1,90 +1,87 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class AsyncTxnMarkRollForward : AsyncWriteBase - { - private readonly Txn txn; - private readonly WriteListener listener; - - public AsyncTxnMarkRollForward - ( - AsyncCluster cluster, - Txn txn, - WriteListener listener, - WritePolicy writePolicy, - Key key - ) : base(cluster, writePolicy, key) - { - this.txn = txn; - this.listener = listener; - } - - public AsyncTxnMarkRollForward(AsyncTxnMarkRollForward other) - : base(other) - { - this.txn = other.txn; - this.listener = other.listener; - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncTxnMarkRollForward(this); - } - - protected internal override void WriteBuffer() - { - SetTxnMarkRollForward(txn, Key); - } - - protected internal override bool ParseResult() - { - ParseHeader(); - - // BIN_EXISTS_ERROR is considered a success because it means a previous attempt already - // succeeded in notifying the server that the MRT will be rolled forward. - if (resultCode == ResultCode.OK || resultCode == ResultCode.BIN_EXISTS_ERROR) - { - return true; - } - - throw new AerospikeException(resultCode); - } - - protected internal override void OnInDoubt() - { - } - - protected internal override void OnSuccess() - { - if (listener != null) - { - listener.OnSuccess(Key); - } - } - - protected internal override void OnFailure(AerospikeException e) - { - if (listener != null) - { - listener.OnFailure(e); - } - } - } -} - +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class AsyncTxnMarkRollForward : AsyncWriteBase + { + private readonly WriteListener listener; + + public AsyncTxnMarkRollForward + ( + AsyncCluster cluster, + WriteListener listener, + WritePolicy writePolicy, + Key key + ) : base(cluster, writePolicy, key) + { + this.listener = listener; + } + + public AsyncTxnMarkRollForward(AsyncTxnMarkRollForward other) + : base(other) + { + this.listener = other.listener; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncTxnMarkRollForward(this); + } + + protected internal override void WriteBuffer() + { + SetTxnMarkRollForward(Key); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + ParseFields(policy.Txn, Key, true); + + // BIN_EXISTS_ERROR is considered a success because it means a previous attempt already + // succeeded in notifying the server that the MRT will be rolled forward. + if (resultCode == ResultCode.OK || resultCode == ResultCode.BIN_EXISTS_ERROR) + { + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnInDoubt() + { + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + } + } +} + diff --git a/AerospikeClient/Async/AsyncTxnMonitor.cs b/AerospikeClient/Async/AsyncTxnMonitor.cs index fea0e19a..566e9800 100644 --- a/AerospikeClient/Async/AsyncTxnMonitor.cs +++ b/AerospikeClient/Async/AsyncTxnMonitor.cs @@ -1,207 +1,201 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using Aerospike.Client; -using System.Diagnostics.Metrics; -using System.Diagnostics; - -namespace Aerospike.Client -{ - public abstract class AsyncTxnMonitor - { - public static void Execute(AsyncCluster cluster, WritePolicy policy, AsyncWriteBase command) - { - if (policy.Txn == null) - { - // Command is not run under a MRT monitor. Run original command. - command.Execute(); - return; - } - - Txn txn = policy.Txn; - Key cmdKey = command.Key; - - if (txn.Writes.Contains(cmdKey)) - { - // MRT monitor already contains this key. Run original command. - command.Execute(); - return; - } - - // Add key to MRT monitor and then run original command. - Operation[] ops = TxnMonitor.GetTxnOps(txn, cmdKey); - SingleTxnMonitor stm = new(cluster, command); - stm.Execute(cluster, policy, ops); - } - - public static void ExecuteBatch( - BatchPolicy policy, - AsyncBatchExecutor executor, - Key[] keys - ) - { - if (policy.Txn == null) - { - // Command is not run under a MRT monitor. Run original command. - executor.Execute(executor.commands); - return; - } - - // Add write keys to MRT monitor and then run original command. - Operation[] ops = TxnMonitor.GetTxnOps(policy.Txn, keys); - BatchTxnMonitor ate = new(executor); - ate.Execute(executor.cluster, policy, ops); - } - - public static void ExecuteBatch( - BatchPolicy policy, - AsyncBatchExecutor executor, - List records - ) - { - if (policy.Txn == null) - { - // Command is not run under a MRT monitor. Run original command. - executor.Execute(); - return; - } - - // Add write keys to MRT monitor and then run original command. - Operation[] ops = TxnMonitor.GetTxnOps(policy.Txn, records); - - if (ops == null) - { - // Readonly batch does not need to add key digests. Run original command. - executor.Execute(); - return; - } - - BatchTxnMonitor ate = new(executor); - ate.Execute(executor.cluster, policy, ops); - } - - public sealed class SingleTxnMonitor : AsyncTxnMonitor - { - public SingleTxnMonitor(AsyncCluster cluster, AsyncWriteBase command) - : base(command, cluster) - { - } - - public override void RunCommand() - { - command.Execute(); - } - - public override void OnFailure(AerospikeException ae) - { - command.OnFailure(ae); - } - } - - public sealed class BatchTxnMonitor : AsyncTxnMonitor - { - private readonly AsyncBatchExecutor executor; - private readonly AsyncBatchCommand[] commands; - - public BatchTxnMonitor(AsyncBatchExecutor executor) - : base(null, null) - { - this.executor = executor; - this.commands = executor.commands; - } - - public override void RunCommand() - { - executor.Execute(commands); - } - - public override void OnFailure(AerospikeException ae) - { - executor.OnFailure(ae); - } - } - - readonly AsyncCommand command; - readonly AsyncCluster cluster; - - private AsyncTxnMonitor(AsyncCommand command, AsyncCluster cluster) - { - this.command = command; - this.cluster = cluster; - } - - void Execute(AsyncCluster cluster, Policy policy, Operation[] ops) - { - Key txnKey = TxnMonitor.GetTxnMonitorKey(policy.Txn); - WritePolicy wp = TxnMonitor.CopyTimeoutPolicy(policy); - - ExecuteRecordListener txnListener = new(this); - - // Add write key(s) to MRT monitor. - OperateArgs args = new(wp, null, null, ops); - AsyncTxnAddKeys txnCommand = new(cluster, txnListener, txnKey, args); - txnCommand.Execute(); - } - - private void NotifyFailure(AerospikeException ae) - { - try - { - OnFailure(ae); - } - catch (Exception t) - { - Log.Error("notifyCommandFailure onFailure() failed: " + t.StackTrace); - } - } - - public abstract void OnFailure(AerospikeException ae); - public abstract void RunCommand(); - - private sealed class ExecuteRecordListener : RecordListener - { - private readonly AsyncTxnMonitor monitor; - - public ExecuteRecordListener(AsyncTxnMonitor monitor) - { - this.monitor = monitor; - } - - public void OnSuccess(Key key, Record record) - { - try - { - // Run original command. - monitor.RunCommand(); - } - catch (AerospikeException ae) - { - monitor.NotifyFailure(ae); - } - catch (Exception t) - { - monitor.NotifyFailure(new AerospikeException(t)); - } - } - - public void OnFailure(AerospikeException ae) - { - monitor.NotifyFailure(new AerospikeException(ResultCode.TXN_FAILED, "Failed to add key(s) to MRT monitor", ae)); - } - } - } -} - +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +namespace Aerospike.Client +{ + public abstract class AsyncTxnMonitor + { + public static void Execute(AsyncCluster cluster, WritePolicy policy, AsyncWriteBase command) + { + if (policy.Txn == null) + { + // Command is not run under a MRT monitor. Run original command. + command.Execute(); + return; + } + + Txn txn = policy.Txn; + Key cmdKey = command.Key; + + if (txn.Writes.Contains(cmdKey)) + { + // MRT monitor already contains this key. Run original command. + command.Execute(); + return; + } + + // Add key to MRT monitor and then run original command. + Operation[] ops = TxnMonitor.GetTxnOps(txn, cmdKey); + SingleTxnMonitor stm = new(cluster, command); + stm.Execute(cluster, policy, ops); + } + + public static void ExecuteBatch( + BatchPolicy policy, + AsyncBatchExecutor executor, + Key[] keys + ) + { + if (policy.Txn == null) + { + // Command is not run under a MRT monitor. Run original command. + executor.Execute(executor.commands); + return; + } + + // Add write keys to MRT monitor and then run original command. + Operation[] ops = TxnMonitor.GetTxnOps(policy.Txn, keys); + BatchTxnMonitor ate = new(executor); + ate.Execute(executor.cluster, policy, ops); + } + + public static void ExecuteBatch( + BatchPolicy policy, + AsyncBatchExecutor executor, + List records + ) + { + if (policy.Txn == null) + { + // Command is not run under a MRT monitor. Run original command. + executor.Execute(); + return; + } + + // Add write keys to MRT monitor and then run original command. + Operation[] ops = TxnMonitor.GetTxnOps(policy.Txn, records); + + if (ops == null) + { + // Readonly batch does not need to add key digests. Run original command. + executor.Execute(); + return; + } + + BatchTxnMonitor ate = new(executor); + ate.Execute(executor.cluster, policy, ops); + } + + public sealed class SingleTxnMonitor : AsyncTxnMonitor + { + public SingleTxnMonitor(AsyncCluster cluster, AsyncWriteBase command) + : base(command, cluster) + { + } + + public override void RunCommand() + { + command.Execute(); + } + + public override void OnFailure(AerospikeException ae) + { + command.OnFailure(ae); + } + } + + public sealed class BatchTxnMonitor : AsyncTxnMonitor + { + private readonly AsyncBatchExecutor executor; + + public BatchTxnMonitor(AsyncBatchExecutor executor) + : base(null, null) + { + this.executor = executor; + } + + public override void RunCommand() + { + executor.Execute(); + } + + public override void OnFailure(AerospikeException ae) + { + executor.OnFailure(ae); + } + } + + readonly AsyncCommand command; + readonly AsyncCluster cluster; + + private AsyncTxnMonitor(AsyncCommand command, AsyncCluster cluster) + { + this.command = command; + this.cluster = cluster; + } + + void Execute(AsyncCluster cluster, Policy policy, Operation[] ops) + { + Key txnKey = TxnMonitor.GetTxnMonitorKey(policy.Txn); + WritePolicy wp = TxnMonitor.CopyTimeoutPolicy(policy); + + ExecuteRecordListener txnListener = new(this); + + // Add write key(s) to MRT monitor. + OperateArgs args = new(wp, null, null, ops); + AsyncTxnAddKeys txnCommand = new(cluster, txnListener, txnKey, args); + txnCommand.Execute(); + } + + private void NotifyFailure(AerospikeException ae) + { + try + { + OnFailure(ae); + } + catch (Exception t) + { + Log.Error("notifyCommandFailure onFailure() failed: " + t.StackTrace); + } + } + + public abstract void OnFailure(AerospikeException ae); + public abstract void RunCommand(); + + private sealed class ExecuteRecordListener : RecordListener + { + private readonly AsyncTxnMonitor monitor; + + public ExecuteRecordListener(AsyncTxnMonitor monitor) + { + this.monitor = monitor; + } + + public void OnSuccess(Key key, Record record) + { + try + { + // Run original command. + monitor.RunCommand(); + } + catch (AerospikeException ae) + { + monitor.NotifyFailure(ae); + } + catch (Exception t) + { + monitor.NotifyFailure(new AerospikeException(t)); + } + } + + public void OnFailure(AerospikeException ae) + { + monitor.NotifyFailure(new AerospikeException(ResultCode.TXN_FAILED, "Failed to add key(s) to MRT monitor", ae)); + } + } + } +} + diff --git a/AerospikeClient/Async/AsyncTxnRoll.cs b/AerospikeClient/Async/AsyncTxnRoll.cs index bf3793ce..276a8100 100644 --- a/AerospikeClient/Async/AsyncTxnRoll.cs +++ b/AerospikeClient/Async/AsyncTxnRoll.cs @@ -1,430 +1,481 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using static Aerospike.Client.AbortStatus; -using static Aerospike.Client.CommitError; -using static Aerospike.Client.CommitStatus; - -namespace Aerospike.Client -{ - public sealed class AsyncTxnRoll - { - private readonly AsyncCluster cluster; - private readonly BatchPolicy verifyPolicy; - private readonly BatchPolicy rollPolicy; - private readonly WritePolicy writePolicy; - private readonly Txn txn; - private readonly Key txnKey; - private CommitListener commitListener; - private AbortListener abortListener; - private BatchRecord[] verifyRecords; - private BatchRecord[] rollRecords; - private AerospikeException verifyException; - - public AsyncTxnRoll - ( - AsyncCluster cluster, - BatchPolicy verifyPolicy, - BatchPolicy rollPolicy, - Txn txn - ) - { - this.cluster = cluster; - this.verifyPolicy = verifyPolicy; - this.rollPolicy = rollPolicy; - this.writePolicy = new WritePolicy(rollPolicy); - this.txn = txn; - this.txnKey = TxnMonitor.GetTxnMonitorKey(txn); - } - - public void Commit(CommitListener listener) - { - commitListener = listener; - Verify(new VerifyListener(this)); - } - - public void Abort(AbortListener listener) - { - abortListener = listener; - - Roll(new RollListener(this), Command.INFO4_MRT_ROLL_BACK); - } - - private void Verify(BatchRecordArrayListener verifyListener) - { - // Validate record versions in a batch. - HashSet> reads = txn.Reads.ToHashSet>(); - int max = reads.Count; - if (max == 0) - { - return; - } - - BatchRecord[] records = new BatchRecord[max]; - Key[] keys = new Key[max]; - long[] versions = new long[max]; - int count = 0; - - foreach (KeyValuePair entry in reads) - { - Key key = entry.Key; - keys[count] = key; - records[count] = new BatchRecord(key, false); - versions[count] = entry.Value; - count++; - } - this.verifyRecords = records; - - new AsyncBatchTxnVerifyExecutor(cluster, verifyPolicy, verifyListener, keys, versions, records); - } - - private void MarkRollForward() - { - // Tell MRT monitor that a roll-forward will commence. - try - { - MarkRollForwardListener writeListener = new(this); - AsyncTxnMarkRollForward command = new(cluster, txn, writeListener, writePolicy, txnKey); - command.Execute(); - } - catch (Exception t) - { - NotifyCommitFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, t, false); - } - } - - private void RollForward() - { - try - { - RollForwardListener rollListener = new(this); - Roll(rollListener, Command.INFO4_MRT_ROLL_FORWARD); - } - catch (Exception t) - { - NotifyCommitSuccess(CommitStatusType.ROLL_FORWARD_ABANDONED); - } - } - - private void RollBack() - { - try - { - RollForwardListener rollListener = new(this); - Roll(rollListener, Command.INFO4_MRT_ROLL_BACK); - } - catch (Exception t) - { - NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, t, false); - } - } - - private void Roll(BatchRecordArrayListener rollListener, int txnAttr) - { - HashSet keySet = txn.Writes; - - if (keySet.Count == 0) - { - return; - } - - Key[] keys = keySet.ToArray(); - BatchRecord[] records = new BatchRecord[keys.Length]; - - for (int i = 0; i < keys.Length; i++) - { - records[i] = new BatchRecord(keys[i], true); - } - - this.rollRecords = records; - - // Copy txn roll policy because it needs to be modified. - BatchPolicy batchPolicy = new(rollPolicy); - - BatchAttr attr = new(); - attr.SetTxn(txnAttr); - - new AsyncBatchTxnRollExecutor(cluster, verifyPolicy, rollListener, keys, records, attr); - } - - private void CloseOnCommit(bool verified) - { - if (!txn.MonitorMightExist()) - { - if (verified) - { - NotifyCommitSuccess(CommitStatusType.OK); - } - else - { - NotifyCommitFailure(CommitErrorType.VERIFY_FAIL, null, false); - } - } - } - - private void CloseOnAbort() - { - if (!txn.MonitorMightExist()) - { - // There is no MRT monitor record to remove. - NotifyAbortSuccess(AbortStatusType.OK); - return; - } - - try - { - CloseOnAbortListener deleteListener = new(this); - AsyncTxnClose command = new(cluster, txn, deleteListener, writePolicy, txnKey); - command.Execute(); - } - catch (Exception t) - { - NotifyAbortSuccess(AbortStatusType.CLOSE_ABANDONED); - } - } - - private void NotifyCommitSuccess(CommitStatusType status) - { - txn.Clear(); - - try - { - commitListener.OnSuccess(status); - } - catch (Exception t) - { - Log.Error("CommitListener onSuccess() failed: " + t.StackTrace); - } - } - - private void NotifyCommitFailure(CommitErrorType error, Exception cause, bool setInDoubt) - { - try - { - AerospikeException.Commit aec = (cause == null) ? - new AerospikeException.Commit(error, verifyRecords, rollRecords) : - new AerospikeException.Commit(error, verifyRecords, rollRecords, cause); - - if (verifyException != null) - { - //aec.AddSuppressed(verifyException); TODO - } - - if (cause is AerospikeException) { - AerospikeException src = (AerospikeException)cause; - aec.Node = src.Node; - aec.Policy = src.Policy; - aec.Iteration = src.Iteration; - - if (setInDoubt) - { - aec.SetInDoubt(src.InDoubt); - } - } - - commitListener.OnFailure(aec); - } - catch (Exception t) - { - Log.Error("CommitListener onFailure() failed: " + t.StackTrace); - } - } - - private void NotifyAbortSuccess(AbortStatusType status) - { - txn.Clear(); - - try - { - abortListener.OnSuccess(status); - } - catch (Exception t) - { - Log.Error("AbortListener onSuccess() failed: " + t.StackTrace); - } - } - - private sealed class VerifyListener : BatchRecordArrayListener - { - private readonly AsyncTxnRoll command; - - public VerifyListener(AsyncTxnRoll command) - { - this.command = command; - } - - public void OnSuccess(BatchRecord[] records, bool status) - { - command.verifyRecords = records; - - if (status) - { - if (command.txn.MonitorExists()) - { - command.MarkRollForward(); - } - else - { - // There is nothing to roll-forward. - command.CloseOnCommit(true); - } - } - else - { - command.RollBack(); - } - } - - public void OnFailure(BatchRecord[] records, AerospikeException ae) - { - command.verifyRecords = records; - command.verifyException = ae; - command.RollBack(); - } - }; - - private sealed class RollListener : BatchRecordArrayListener - { - private readonly AsyncTxnRoll command; - - public RollListener(AsyncTxnRoll command) - { - this.command = command; - } - - public void OnSuccess(BatchRecord[] records, bool status) - { - command.rollRecords = records; - - if (status) - { - command.CloseOnAbort(); - } - else - { - command.NotifyAbortSuccess(AbortStatusType.ROLL_BACK_ABANDONED); - } - } - - public void OnFailure(BatchRecord[] records, AerospikeException ae) - { - command.rollRecords = records; - command.NotifyAbortSuccess(AbortStatusType.ROLL_BACK_ABANDONED); - } - }; - - private sealed class MarkRollForwardListener : WriteListener - { - private readonly AsyncTxnRoll command; - - public MarkRollForwardListener(AsyncTxnRoll command) - { - this.command = command; - } - - public void OnSuccess(Key key) - { - command.RollForward(); - } - - public void OnFailure(AerospikeException ae) - { - command.NotifyCommitFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, ae, true); - } - }; - - private sealed class RollForwardListener : BatchRecordArrayListener - { - private readonly AsyncTxnRoll command; - - public RollForwardListener(AsyncTxnRoll command) - { - this.command = command; - } - - public void OnSuccess(BatchRecord[] records, bool status) - { - command.rollRecords = records; - - if (status) - { - command.CloseOnCommit(true); - } - else - { - command.NotifyCommitSuccess(CommitStatusType.ROLL_FORWARD_ABANDONED); - } - } - - public void OnFailure(BatchRecord[] records, AerospikeException ae) - { - command.rollRecords = records; - command.NotifyCommitSuccess(CommitStatusType.ROLL_FORWARD_ABANDONED); - } - }; - - private sealed class RollBackListener : BatchRecordArrayListener - { - private readonly AsyncTxnRoll command; - - public RollBackListener(AsyncTxnRoll command) - { - this.command = command; - } - - public void OnSuccess(BatchRecord[] records, bool status) - { - command.rollRecords = records; - - if (status) - { - command.CloseOnCommit(false); - } - else - { - command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, null, false); - } - } - - public void OnFailure(BatchRecord[] records, AerospikeException ae) - { - command.rollRecords = records; - command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, ae, false); - } - }; - - private sealed class CloseOnAbortListener : DeleteListener - { - private readonly AsyncTxnRoll command; - - public CloseOnAbortListener(AsyncTxnRoll command) - { - this.command = command; - } - - public void OnSuccess(Key key, bool existed) - { - command.NotifyAbortSuccess(AbortStatusType.OK); - } - - public void OnFailure(AerospikeException ae) - { - command.NotifyAbortSuccess(AbortStatusType.CLOSE_ABANDONED); - } - }; - } -} - +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using static Aerospike.Client.AbortStatus; +using static Aerospike.Client.CommitError; +using static Aerospike.Client.CommitStatus; + +namespace Aerospike.Client +{ + public sealed class AsyncTxnRoll + { + private readonly AsyncCluster cluster; + private readonly BatchPolicy verifyPolicy; + private readonly BatchPolicy rollPolicy; + private readonly WritePolicy writePolicy; + private readonly Txn txn; + private readonly Key txnKey; + private CommitListener commitListener; + private AbortListener abortListener; + private BatchRecord[] verifyRecords; + private BatchRecord[] rollRecords; + private AerospikeException verifyException; + + public AsyncTxnRoll + ( + AsyncCluster cluster, + BatchPolicy verifyPolicy, + BatchPolicy rollPolicy, + Txn txn + ) + { + this.cluster = cluster; + this.verifyPolicy = verifyPolicy; + this.rollPolicy = rollPolicy; + this.writePolicy = new WritePolicy(rollPolicy); + this.txn = txn; + this.txnKey = TxnMonitor.GetTxnMonitorKey(txn); + } + + public void Commit(CommitListener listener) + { + commitListener = listener; + Verify(new VerifyListener(this)); + } + + public void Abort(AbortListener listener) + { + abortListener = listener; + + Roll(new RollListener(this), Command.INFO4_MRT_ROLL_BACK); + } + + private void Verify(BatchRecordArrayListener verifyListener) + { + // Validate record versions in a batch. + HashSet> reads = txn.Reads.ToHashSet>(); + int max = reads.Count; + if (max == 0) + { + verifyListener.OnSuccess(new BatchRecord[0], true); + return; + } + + BatchRecord[] records = new BatchRecord[max]; + Key[] keys = new Key[max]; + long?[] versions = new long?[max]; + int count = 0; + + foreach (KeyValuePair entry in reads) + { + Key key = entry.Key; + keys[count] = key; + records[count] = new BatchRecord(key, false); + versions[count] = entry.Value; + count++; + } + this.verifyRecords = records; + + AsyncBatchTxnVerifyExecutor executor = new(cluster, verifyPolicy, verifyListener, keys, versions, records); + executor.Execute(); + } + + private void MarkRollForward() + { + // Tell MRT monitor that a roll-forward will commence. + try + { + MarkRollForwardListener writeListener = new(this); + AsyncTxnMarkRollForward command = new(cluster, writeListener, writePolicy, txnKey); + command.Execute(); + } + catch (Exception t) + { + NotifyCommitFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, t, false); + } + } + + private void RollForward() + { + try + { + RollForwardListener rollListener = new(this); + Roll(rollListener, Command.INFO4_MRT_ROLL_FORWARD); + } + catch (Exception e) + { + NotifyCommitSuccess(CommitStatusType.ROLL_FORWARD_ABANDONED); + } + } + + private void RollBack() + { + try + { + RollForwardListener rollListener = new(this); + Roll(rollListener, Command.INFO4_MRT_ROLL_BACK); + } + catch (Exception t) + { + NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, t, false); + } + } + + private void Roll(BatchRecordArrayListener rollListener, int txnAttr) + { + HashSet keySet = txn.Writes; + + if (keySet.Count == 0) + { + rollListener.OnSuccess(new BatchRecord[0], true); + return; + } + + Key[] keys = keySet.ToArray(); + BatchRecord[] records = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + records[i] = new BatchRecord(keys[i], true); + } + + BatchAttr attr = new(); + attr.SetTxn(txnAttr); + + AsyncBatchTxnRollExecutor executor = new(cluster, rollPolicy, rollListener, txn, keys, records, attr); + executor.Execute(); + } + + private void CloseOnCommit(bool verified) + { + if (!txn.MonitorMightExist()) + { + if (verified) + { + NotifyCommitSuccess(CommitStatusType.OK); + } + else + { + NotifyCommitFailure(CommitErrorType.VERIFY_FAIL, null, false); + } + return; + } + + try + { + AsyncTxnClose command = new(cluster, txn, new CloseOnCommitListener(this, verified), writePolicy, txnKey); + command.Execute(); + } + catch (Exception e) + { + if (verified) { + NotifyCommitSuccess(CommitStatusType.CLOSE_ABANDONED); + } + else { + NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, e, false); + } + } + } + + private void CloseOnAbort() + { + if (!txn.MonitorMightExist()) + { + // There is no MRT monitor record to remove. + NotifyAbortSuccess(AbortStatusType.OK); + return; + } + + try + { + CloseOnAbortListener deleteListener = new(this); + AsyncTxnClose command = new(cluster, txn, deleteListener, writePolicy, txnKey); + command.Execute(); + } + catch (Exception e) + { + NotifyAbortSuccess(AbortStatusType.CLOSE_ABANDONED); + } + } + + private void NotifyCommitSuccess(CommitStatusType status) + { + txn.Clear(); + + try + { + commitListener.OnSuccess(status); + } + catch (Exception t) + { + Log.Error("CommitListener onSuccess() failed: " + t.StackTrace); + } + } + + private void NotifyCommitFailure(CommitErrorType error, Exception cause, bool setInDoubt) + { + try + { + AerospikeException.Commit aec = (cause == null) ? + new AerospikeException.Commit(error, verifyRecords, rollRecords) : + new AerospikeException.Commit(error, verifyRecords, rollRecords, cause); + + if (verifyException != null) + { + //aec.AddSuppressed(verifyException); TODO + } + + if (cause is AerospikeException) { + AerospikeException src = (AerospikeException)cause; + aec.Node = src.Node; + aec.Policy = src.Policy; + aec.Iteration = src.Iteration; + + if (setInDoubt) + { + aec.SetInDoubt(src.InDoubt); + } + } + + commitListener.OnFailure(aec); + } + catch (Exception t) + { + Log.Error("CommitListener onFailure() failed: " + t.StackTrace); + } + } + + private void NotifyAbortSuccess(AbortStatusType status) + { + txn.Clear(); + + try + { + abortListener.OnSuccess(status); + } + catch (Exception t) + { + Log.Error("AbortListener onSuccess() failed: " + t.StackTrace); + } + } + + private sealed class VerifyListener : BatchRecordArrayListener + { + private readonly AsyncTxnRoll command; + + public VerifyListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + command.verifyRecords = records; + + if (status) + { + if (command.txn.MonitorExists()) + { + command.MarkRollForward(); + } + else + { + // There is nothing to roll-forward. + command.CloseOnCommit(true); + } + } + else + { + command.RollBack(); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException ae) + { + command.verifyRecords = records; + command.verifyException = ae; + command.RollBack(); + } + }; + + private sealed class RollListener : BatchRecordArrayListener + { + private readonly AsyncTxnRoll command; + + public RollListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + command.rollRecords = records; + + if (status) + { + command.CloseOnAbort(); + } + else + { + command.NotifyAbortSuccess(AbortStatusType.ROLL_BACK_ABANDONED); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException ae) + { + command.rollRecords = records; + command.NotifyAbortSuccess(AbortStatusType.ROLL_BACK_ABANDONED); + } + }; + + private sealed class MarkRollForwardListener : WriteListener + { + private readonly AsyncTxnRoll command; + + public MarkRollForwardListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(Key key) + { + command.RollForward(); + } + + public void OnFailure(AerospikeException ae) + { + command.NotifyCommitFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, ae, true); + } + }; + + private sealed class RollForwardListener : BatchRecordArrayListener + { + private readonly AsyncTxnRoll command; + + public RollForwardListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + command.rollRecords = records; + + if (status) + { + command.CloseOnCommit(true); + } + else + { + command.NotifyCommitSuccess(CommitStatusType.ROLL_FORWARD_ABANDONED); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException ae) + { + command.rollRecords = records; + command.NotifyCommitSuccess(CommitStatusType.ROLL_FORWARD_ABANDONED); + } + }; + + private sealed class RollBackListener : BatchRecordArrayListener + { + private readonly AsyncTxnRoll command; + + public RollBackListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + command.rollRecords = records; + + if (status) + { + command.CloseOnCommit(false); + } + else + { + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, null, false); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException ae) + { + command.rollRecords = records; + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, ae, false); + } + }; + + + private sealed class CloseOnCommitListener : DeleteListener + { + private readonly AsyncTxnRoll command; + private readonly bool verified; + + public CloseOnCommitListener(AsyncTxnRoll command, bool verified) + { + this.command = command; + this.verified = verified; + } + + public void OnSuccess(Key key, bool existed) + { + if (verified) + { + command.NotifyCommitSuccess(CommitStatusType.OK); + } + else + { + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL, null, false); + } + } + + public void OnFailure(AerospikeException ae) + { + if (verified) + { + command.NotifyCommitSuccess(CommitStatusType.CLOSE_ABANDONED); + } + else + { + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, ae, false); + } + } + }; + private sealed class CloseOnAbortListener : DeleteListener + { + private readonly AsyncTxnRoll command; + + public CloseOnAbortListener(AsyncTxnRoll command) + { + this.command = command; + } + + public void OnSuccess(Key key, bool existed) + { + command.NotifyAbortSuccess(AbortStatusType.OK); + } + + public void OnFailure(AerospikeException ae) + { + command.NotifyAbortSuccess(AbortStatusType.CLOSE_ABANDONED); + } + }; + } +} + diff --git a/AerospikeClient/Async/AsyncWrite.cs b/AerospikeClient/Async/AsyncWrite.cs index b3d8d972..0b2f03d0 100644 --- a/AerospikeClient/Async/AsyncWrite.cs +++ b/AerospikeClient/Async/AsyncWrite.cs @@ -1,97 +1,98 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class AsyncWrite : AsyncWriteBase - { - private readonly WriteListener listener; - private readonly Bin[] bins; - private readonly Operation.Type operation; - - public AsyncWrite - ( - AsyncCluster cluster, - WritePolicy writePolicy, - WriteListener listener, - Key key, - Bin[] bins, - Operation.Type operation - ) : base(cluster, writePolicy, key) - { - this.listener = listener; - this.bins = bins; - this.operation = operation; - } - - public AsyncWrite(AsyncWrite other) - : base(other) - { - this.listener = other.listener; - this.bins = other.bins; - this.operation = other.operation; - } - - protected internal override AsyncCommand CloneCommand() - { - return new AsyncWrite(this); - } - - protected internal override void WriteBuffer() - { - SetWrite(writePolicy, operation, Key, bins); - } - - protected internal override bool ParseResult() - { - ParseHeader(); - - if (resultCode == ResultCode.OK) - { - return true; - } - - if (resultCode == ResultCode.FILTERED_OUT) - { - if (policy.failOnFilteredOut) - { - throw new AerospikeException(resultCode); - } - return true; - } - - throw new AerospikeException(resultCode); - } - - protected internal override void OnSuccess() - { - if (listener != null) - { - listener.OnSuccess(Key); - } - } - - protected internal override void OnFailure(AerospikeException e) - { - if (listener != null) - { - listener.OnFailure(e); - } - } - } -} - +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class AsyncWrite : AsyncWriteBase + { + private readonly WriteListener listener; + private readonly Bin[] bins; + private readonly Operation.Type operation; + + public AsyncWrite + ( + AsyncCluster cluster, + WritePolicy writePolicy, + WriteListener listener, + Key key, + Bin[] bins, + Operation.Type operation + ) : base(cluster, writePolicy, key) + { + this.listener = listener; + this.bins = bins; + this.operation = operation; + } + + public AsyncWrite(AsyncWrite other) + : base(other) + { + this.listener = other.listener; + this.bins = other.bins; + this.operation = other.operation; + } + + protected internal override AsyncCommand CloneCommand() + { + return new AsyncWrite(this); + } + + protected internal override void WriteBuffer() + { + SetWrite(writePolicy, operation, Key, bins); + } + + protected internal override bool ParseResult() + { + ParseHeader(); + ParseFields(policy.Txn, Key, true); + + if (resultCode == ResultCode.OK) + { + return true; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (policy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return true; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnSuccess() + { + if (listener != null) + { + listener.OnSuccess(Key); + } + } + + protected internal override void OnFailure(AerospikeException e) + { + if (listener != null) + { + listener.OnFailure(e); + } + } + } +} + diff --git a/AerospikeClient/Command/Batch.cs b/AerospikeClient/Command/Batch.cs index f034b103..16a1f68c 100644 --- a/AerospikeClient/Command/Batch.cs +++ b/AerospikeClient/Command/Batch.cs @@ -1,842 +1,842 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using System.Collections; - -namespace Aerospike.Client -{ - //------------------------------------------------------- - // ReadList - //------------------------------------------------------- - - public sealed class BatchReadListCommand : BatchCommand - { - private readonly List records; - - public BatchReadListCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy policy, - List records, - BatchStatus status - ) : base(cluster, batch, policy, status, true) - { - this.records = records; - } - - protected internal override void WriteBuffer() - { - if (batch.node != null && batch.node.HasBatchAny) - { - SetBatchOperate(batchPolicy, records, batch); - } - else - { - SetBatchRead(batchPolicy, records, batch); - } - } - - protected internal override bool ParseRow() - { - BatchRead record = records[batchIndex]; - - ParseFieldsRead(record.key); - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - } - else - { - record.SetError(resultCode, false); - status.SetRowError(); - } - return true; - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchReadListCommand(cluster, batchNode, batchPolicy, records, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, status); - } - } - - //------------------------------------------------------- - // GetArray - //------------------------------------------------------- - - public sealed class BatchGetArrayCommand : BatchCommand - { - private readonly Key[] keys; - private readonly string[] binNames; - private readonly Operation[] ops; - private readonly Record[] records; - private readonly int readAttr; - - public BatchGetArrayCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy policy, - Key[] keys, - string[] binNames, - Operation[] ops, - Record[] records, - int readAttr, - bool isOperation, - BatchStatus status - ) : base(cluster, batch, policy, status, isOperation) - { - this.keys = keys; - this.binNames = binNames; - this.ops = ops; - this.records = records; - this.readAttr = readAttr; - } - - protected internal override void WriteBuffer() - { - if (batch.node != null && batch.node.HasBatchAny) - { - BatchAttr attr = new(policy, readAttr, ops); - SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); - } - else - { - SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr); - } - } - - protected internal override bool ParseRow() - { - ParseFieldsRead(keys[batchIndex]); - - if (resultCode == 0) - { - records[batchIndex] = ParseRecord(); - } - return true; - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchGetArrayCommand(cluster, batchNode, batchPolicy, keys, binNames, ops, records, readAttr, isOperation, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, status); - } - } - - //------------------------------------------------------- - // ExistsArray - //------------------------------------------------------- - - public sealed class BatchExistsArrayCommand : BatchCommand - { - private readonly Key[] keys; - private readonly bool[] existsArray; - - public BatchExistsArrayCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy policy, - Key[] keys, - bool[] existsArray, - BatchStatus status - ) : base(cluster, batch, policy, status, false) - { - this.keys = keys; - this.existsArray = existsArray; - } - - protected internal override void WriteBuffer() - { - if (batch.node != null && batch.node.HasBatchAny) - { - BatchAttr attr = new(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA); - SetBatchOperate(batchPolicy, keys, batch, null, null, attr); - } - else - { - SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA); - } - } - - protected internal override bool ParseRow() - { - ParseFieldsRead(keys[batchIndex]); - existsArray[batchIndex] = resultCode == 0; - return true; - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchExistsArrayCommand(cluster, batchNode, batchPolicy, keys, existsArray, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, status); - } - } - - //------------------------------------------------------- - // OperateList - //------------------------------------------------------- - - public sealed class BatchOperateListCommand : BatchCommand - { - private readonly IList records; - - public BatchOperateListCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy policy, - IList records, - BatchStatus status - ) : base(cluster, batch, policy, status, true) - { - this.records = records; - } - - protected internal override bool IsWrite() - { - // This method is only called to set inDoubt on node level errors. - // SetError() will filter out reads when setting record level inDoubt. - return true; - } - - protected internal override void WriteBuffer() - { - SetBatchOperate(batchPolicy, (IList)records, batch); - } - - protected internal override bool ParseRow() - { - BatchRecord record = records[batchIndex]; - - ParseFields(record); - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - return true; - } - - if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - Record r = ParseRecord(); - string m = r.GetString("FAILURE"); - - if (m != null) - { - // Need to store record because failure bin contains an error message. - record.record = r; - record.resultCode = resultCode; - record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter); - status.SetRowError(); - return true; - } - } - - record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); - status.SetRowError(); - return true; - } - - protected internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - record.inDoubt = record.hasWrite; - - if (record.inDoubt && policy.Txn != null) { - policy.Txn.OnWriteInDoubt(record.key); - } - } - } - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchOperateListCommand(cluster, batchNode, batchPolicy, records, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, (IList)records, sequenceAP, sequenceSC, batch, status); - } - } - - //------------------------------------------------------- - // OperateArray - //------------------------------------------------------- - - public sealed class BatchOperateArrayCommand : BatchCommand - { - private readonly Key[] keys; - private readonly Operation[] ops; - private readonly BatchRecord[] records; - private readonly BatchAttr attr; - - public BatchOperateArrayCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - Operation[] ops, - BatchRecord[] records, - BatchAttr attr, - BatchStatus status - ) : base(cluster, batch, batchPolicy, status, ops != null) - { - this.keys = keys; - this.ops = ops; - this.records = records; - this.attr = attr; - } - - protected internal override bool IsWrite() - { - return attr.hasWrite; - } - - protected internal override void WriteBuffer() - { - SetBatchOperate(batchPolicy, keys, batch, null, ops, attr); - } - - protected internal override bool ParseRow() - { - BatchRecord record = records[batchIndex]; - - ParseFields(record); - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - } - else - { - record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); - status.SetRowError(); - } - return true; - } - - protected internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt || !attr.hasWrite) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - record.inDoubt = inDoubt; - - if (record.inDoubt && policy.Txn != null) { - policy.Txn.OnWriteInDoubt(record.key); - } - } - } - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, ops, records, attr, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status); - } - } - - //------------------------------------------------------- - // UDF - //------------------------------------------------------- - - public sealed class BatchUDFCommand : BatchCommand - { - private readonly Key[] keys; - private readonly string packageName; - private readonly string functionName; - private readonly byte[] argBytes; - private readonly BatchRecord[] records; - private readonly BatchAttr attr; - - public BatchUDFCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - string packageName, - string functionName, - byte[] argBytes, - BatchRecord[] records, - BatchAttr attr, - BatchStatus status - ) : base(cluster, batch, batchPolicy, status, false) - { - this.keys = keys; - this.packageName = packageName; - this.functionName = functionName; - this.argBytes = argBytes; - this.records = records; - this.attr = attr; - } - - protected internal override bool IsWrite() - { - return attr.hasWrite; - } - - protected internal override void WriteBuffer() - { - SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr); - } - - protected internal override bool ParseRow() - { - BatchRecord record = records[batchIndex]; - - ParseFields(record); - - if (resultCode == 0) - { - record.SetRecord(ParseRecord()); - return true; - } - - if (resultCode == ResultCode.UDF_BAD_RESPONSE) - { - Record r = ParseRecord(); - string m = r.GetString("FAILURE"); - - if (m != null) - { - // Need to store record because failure bin contains an error message. - record.record = r; - record.resultCode = resultCode; - record.inDoubt = Command.BatchInDoubt(attr.hasWrite, commandSentCounter); - status.SetRowError(); - return true; - } - } - - record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); - status.SetRowError(); - return true; - } - - protected internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt || !attr.hasWrite) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - record.inDoubt = inDoubt; - - if (record.inDoubt && policy.Txn != null) { - policy.Txn.OnWriteInDoubt(record.key); - } - } - } - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchUDFCommand(cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status); - } - } - - //------------------------------------------------------- - // MRT - //------------------------------------------------------- - - public sealed class BatchTxnVerify : BatchCommand - { - private readonly Txn txn; - private readonly Key[] keys; - private readonly long[] versions; - private readonly BatchRecord[] records; - - public BatchTxnVerify( - Cluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Txn txn, - Key[] keys, - long[] versions, - BatchRecord[] records, - BatchStatus status - ) : base(cluster, batch, batchPolicy, status, false) - { - this.txn = txn; - this.keys = keys; - this.versions = versions; - this.records = records; - } - - protected internal override bool IsWrite() - { - return false; - } - - protected internal override void WriteBuffer() - { - SetBatchTxnVerify(batchPolicy, txn, keys, versions, batch); - } - - protected internal override bool ParseRow() - { - SkipKey(fieldCount); - - BatchRecord record = records[batchIndex]; - - if (resultCode == 0) - { - record.resultCode = resultCode; - } - else - { - record.SetError(resultCode, false); - status.SetRowError(); - } - return true; - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchTxnVerify(cluster, batchNode, batchPolicy, txn, keys, versions, records, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, false, status); - } - } - - public sealed class BatchTxnRoll : BatchCommand - { - private readonly Key[] keys; - private readonly BatchRecord[] records; - private readonly BatchAttr attr; - - public BatchTxnRoll( - Cluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - Key[] keys, - BatchRecord[] records, - BatchAttr attr, - BatchStatus status - ) : base(cluster, batch, batchPolicy, status, false) - { - this.keys = keys; - this.records = records; - this.attr = attr; - } - - protected internal override bool IsWrite() - { - return attr.hasWrite; - } - - protected internal override void WriteBuffer() - { - SetBatchTxnRoll(batchPolicy, keys, batch, attr); - } - - protected internal override bool ParseRow() - { - SkipKey(fieldCount); - - BatchRecord record = records[batchIndex]; - - if (resultCode == 0) - { - record.resultCode = resultCode; - } - else - { - record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); - status.SetRowError(); - } - return true; - } - - protected internal override void SetInDoubt(bool inDoubt) - { - if (!inDoubt || !attr.hasWrite) - { - return; - } - - foreach (int index in batch.offsets) - { - BatchRecord record = records[index]; - - if (record.resultCode == ResultCode.NO_RESPONSE) - { - record.inDoubt = true; - } - } - } - - protected internal override BatchCommand CreateCommand(BatchNode batchNode) - { - return new BatchTxnRoll(cluster, batchNode, batchPolicy, keys, records, attr, status); - } - - protected internal override List GenerateBatchNodes() - { - return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status); - } - } - - //------------------------------------------------------- - // Batch Base Command - //------------------------------------------------------- - - public abstract class BatchCommand : MultiCommand - { - internal readonly BatchNode batch; - internal readonly BatchPolicy batchPolicy; - internal readonly BatchStatus status; - internal BatchExecutor parent; - internal uint sequenceAP; - internal uint sequenceSC; - internal bool splitRetry; - - public BatchCommand - ( - Cluster cluster, - BatchNode batch, - BatchPolicy batchPolicy, - BatchStatus status, - bool isOperation - ) : base(cluster, batchPolicy, batch.node, isOperation) - { - this.batch = batch; - this.batchPolicy = batchPolicy; - this.status = status; - } - - public void Run(object obj) - { - try - { - Execute(); - } - catch (AerospikeException ae) - { - // Set error/inDoubt for keys associated this batch command when - // the command was not retried and split. If a split retry occurred, - // those new subcommands have already set error/inDoubt on the affected - // subset of keys. - if (!splitRetry) - { - SetInDoubt(ae.InDoubt); - } - status.SetException(ae); - } - catch (Exception e) - { - if (!splitRetry) - { - SetInDoubt(true); - } - status.SetException(e); - } - finally - { - parent.OnComplete(); - } - } - - protected void ParseFieldsRead(Key key) - { - if (policy.Txn != null) - { - long? version = ParseVersion(fieldCount); - policy.Txn.OnRead(key, version); - } - else - { - SkipKey(fieldCount); - } - } - - protected void ParseFields(BatchRecord br) - { - if (policy.Txn != null) - { - long? version = ParseVersion(fieldCount); - - if (br.hasWrite) - { - policy.Txn.OnWrite(br.key, version, resultCode); - } - else - { - policy.Txn.OnRead(br.key, version); - } - } - else - { - SkipKey(fieldCount); - } - } - - protected override Latency.LatencyType GetLatencyType() - { - return Latency.LatencyType.BATCH; - } - - protected internal override bool PrepareRetry(bool timeout) - { - if (!((batchPolicy.replica == Replica.SEQUENCE || batchPolicy.replica == Replica.PREFER_RACK) && - (parent == null || !parent.IsDone()))) - { - // Perform regular retry to same node. - return true; - } - sequenceAP++; - - if (!timeout || batchPolicy.readModeSC != ReadModeSC.LINEARIZE) - { - sequenceSC++; - } - return false; - } - - protected internal override bool RetryBatch - ( - Cluster cluster, - int socketTimeout, - int totalTimeout, - DateTime deadline, - int iteration, - int commandSentCounter - ) - { - // Retry requires keys for this node to be split among other nodes. - // This is both recursive and exponential. - List batchNodes = GenerateBatchNodes(); - - if (batchNodes.Count == 1 && batchNodes[0].node == batch.node) - { - // Batch node is the same. Go through normal retry. - return false; - } - - splitRetry = true; - - // Run batch requests sequentially in same thread. - foreach (BatchNode batchNode in batchNodes) - { - BatchCommand command = CreateCommand(batchNode); - command.parent = parent; - command.sequenceAP = sequenceAP; - command.sequenceSC = sequenceSC; - command.socketTimeout = socketTimeout; - command.totalTimeout = totalTimeout; - command.iteration = iteration; - command.commandSentCounter = commandSentCounter; - command.deadline = deadline; - - try - { - cluster.AddRetry(); - command.ExecuteCommand(); - } - catch (AerospikeException ae) - { - if (!command.splitRetry) - { - command.SetInDoubt(ae.InDoubt); - } - status.SetException(ae); - - if (!batchPolicy.respondAllKeys) - { - throw; - } - } - catch (Exception e) - { - if (!command.splitRetry) - { - command.SetInDoubt(true); - } - status.SetException(e); - - if (!batchPolicy.respondAllKeys) - { - throw; - } - } - } - return true; - } - - protected internal virtual void SetInDoubt(bool inDoubt) - { - // Do nothing by default. Batch writes will override this method. - } - - protected internal abstract BatchCommand CreateCommand(BatchNode batchNode); - protected internal abstract List GenerateBatchNodes(); - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using System.Collections; + +namespace Aerospike.Client +{ + //------------------------------------------------------- + // ReadList + //------------------------------------------------------- + + public sealed class BatchReadListCommand : BatchCommand + { + private readonly List records; + + public BatchReadListCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy policy, + List records, + BatchStatus status + ) : base(cluster, batch, policy, status, true) + { + this.records = records; + } + + protected internal override void WriteBuffer() + { + if (batch.node != null && batch.node.HasBatchAny) + { + SetBatchOperate(batchPolicy, records, batch); + } + else + { + SetBatchRead(batchPolicy, records, batch); + } + } + + protected internal override bool ParseRow() + { + BatchRead record = records[batchIndex]; + + ParseFieldsRead(record.key); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + } + else + { + record.SetError(resultCode, false); + status.SetRowError(); + } + return true; + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchReadListCommand(cluster, batchNode, batchPolicy, records, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, records, sequenceAP, sequenceSC, batch, status); + } + } + + //------------------------------------------------------- + // GetArray + //------------------------------------------------------- + + public sealed class BatchGetArrayCommand : BatchCommand + { + private readonly Key[] keys; + private readonly string[] binNames; + private readonly Operation[] ops; + private readonly Record[] records; + private readonly int readAttr; + + public BatchGetArrayCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy policy, + Key[] keys, + string[] binNames, + Operation[] ops, + Record[] records, + int readAttr, + bool isOperation, + BatchStatus status + ) : base(cluster, batch, policy, status, isOperation) + { + this.keys = keys; + this.binNames = binNames; + this.ops = ops; + this.records = records; + this.readAttr = readAttr; + } + + protected internal override void WriteBuffer() + { + if (batch.node != null && batch.node.HasBatchAny) + { + BatchAttr attr = new(policy, readAttr, ops); + SetBatchOperate(batchPolicy, keys, batch, binNames, ops, attr); + } + else + { + SetBatchRead(batchPolicy, keys, batch, binNames, ops, readAttr); + } + } + + protected internal override bool ParseRow() + { + ParseFieldsRead(keys[batchIndex]); + + if (resultCode == 0) + { + records[batchIndex] = ParseRecord(); + } + return true; + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchGetArrayCommand(cluster, batchNode, batchPolicy, keys, binNames, ops, records, readAttr, isOperation, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, status); + } + } + + //------------------------------------------------------- + // ExistsArray + //------------------------------------------------------- + + public sealed class BatchExistsArrayCommand : BatchCommand + { + private readonly Key[] keys; + private readonly bool[] existsArray; + + public BatchExistsArrayCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy policy, + Key[] keys, + bool[] existsArray, + BatchStatus status + ) : base(cluster, batch, policy, status, false) + { + this.keys = keys; + this.existsArray = existsArray; + } + + protected internal override void WriteBuffer() + { + if (batch.node != null && batch.node.HasBatchAny) + { + BatchAttr attr = new(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA); + SetBatchOperate(batchPolicy, keys, batch, null, null, attr); + } + else + { + SetBatchRead(batchPolicy, keys, batch, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA); + } + } + + protected internal override bool ParseRow() + { + ParseFieldsRead(keys[batchIndex]); + existsArray[batchIndex] = resultCode == 0; + return true; + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchExistsArrayCommand(cluster, batchNode, batchPolicy, keys, existsArray, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, status); + } + } + + //------------------------------------------------------- + // OperateList + //------------------------------------------------------- + + public sealed class BatchOperateListCommand : BatchCommand + { + private readonly IList records; + + public BatchOperateListCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy policy, + IList records, + BatchStatus status + ) : base(cluster, batch, policy, status, true) + { + this.records = records; + } + + protected internal override bool IsWrite() + { + // This method is only called to set inDoubt on node level errors. + // SetError() will filter out reads when setting record level inDoubt. + return true; + } + + protected internal override void WriteBuffer() + { + SetBatchOperate(batchPolicy, (IList)records, batch); + } + + protected internal override bool ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + return true; + } + + if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record r = ParseRecord(); + string m = r.GetString("FAILURE"); + + if (m != null) + { + // Need to store record because failure bin contains an error message. + record.record = r; + record.resultCode = resultCode; + record.inDoubt = Command.BatchInDoubt(record.hasWrite, commandSentCounter); + status.SetRowError(); + return true; + } + } + + record.SetError(resultCode, Command.BatchInDoubt(record.hasWrite, commandSentCounter)); + status.SetRowError(); + return true; + } + + protected internal override void InDoubt() + { + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = record.hasWrite; + + if (record.inDoubt && policy.Txn != null) { + policy.Txn.OnWriteInDoubt(record.key); + } + } + } + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchOperateListCommand(cluster, batchNode, batchPolicy, records, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, (IList)records, sequenceAP, sequenceSC, batch, status); + } + } + + //------------------------------------------------------- + // OperateArray + //------------------------------------------------------- + + public sealed class BatchOperateArrayCommand : BatchCommand + { + private readonly Key[] keys; + private readonly Operation[] ops; + private readonly BatchRecord[] records; + private readonly BatchAttr attr; + + public BatchOperateArrayCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + Operation[] ops, + BatchRecord[] records, + BatchAttr attr, + BatchStatus status + ) : base(cluster, batch, batchPolicy, status, ops != null) + { + this.keys = keys; + this.ops = ops; + this.records = records; + this.attr = attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchOperate(batchPolicy, keys, batch, null, ops, attr); + } + + protected internal override bool ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + status.SetRowError(); + } + return true; + } + + protected internal override void InDoubt() + { + if (!attr.hasWrite) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = true; + + if (policy.Txn != null) { + policy.Txn.OnWriteInDoubt(record.key); + } + } + } + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, ops, records, attr, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status); + } + } + + //------------------------------------------------------- + // UDF + //------------------------------------------------------- + + public sealed class BatchUDFCommand : BatchCommand + { + private readonly Key[] keys; + private readonly string packageName; + private readonly string functionName; + private readonly byte[] argBytes; + private readonly BatchRecord[] records; + private readonly BatchAttr attr; + + public BatchUDFCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + string packageName, + string functionName, + byte[] argBytes, + BatchRecord[] records, + BatchAttr attr, + BatchStatus status + ) : base(cluster, batch, batchPolicy, status, false) + { + this.keys = keys; + this.packageName = packageName; + this.functionName = functionName; + this.argBytes = argBytes; + this.records = records; + this.attr = attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchUDF(batchPolicy, keys, batch, packageName, functionName, argBytes, attr); + } + + protected internal override bool ParseRow() + { + BatchRecord record = records[batchIndex]; + + ParseFields(record); + + if (resultCode == 0) + { + record.SetRecord(ParseRecord()); + return true; + } + + if (resultCode == ResultCode.UDF_BAD_RESPONSE) + { + Record r = ParseRecord(); + string m = r.GetString("FAILURE"); + + if (m != null) + { + // Need to store record because failure bin contains an error message. + record.record = r; + record.resultCode = resultCode; + record.inDoubt = Command.BatchInDoubt(attr.hasWrite, commandSentCounter); + status.SetRowError(); + return true; + } + } + + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + status.SetRowError(); + return true; + } + + protected internal override void InDoubt() + { + if (!attr.hasWrite) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = true; + + if (policy.Txn != null) { + policy.Txn.OnWriteInDoubt(record.key); + } + } + } + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchUDFCommand(cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status); + } + } + + //------------------------------------------------------- + // MRT + //------------------------------------------------------- + + public sealed class BatchTxnVerify : BatchCommand + { + private readonly Key[] keys; + private readonly long?[] versions; + private readonly BatchRecord[] records; + + public BatchTxnVerify( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + long?[] versions, + BatchRecord[] records, + BatchStatus status + ) : base(cluster, batch, batchPolicy, status, false) + { + this.keys = keys; + this.versions = versions; + this.records = records; + } + + protected internal override bool IsWrite() + { + return false; + } + + protected internal override void WriteBuffer() + { + SetBatchTxnVerify(batchPolicy, keys, versions, batch); + } + + protected internal override bool ParseRow() + { + SkipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == 0) + { + record.resultCode = resultCode; + } + else + { + record.SetError(resultCode, false); + status.SetRowError(); + } + return true; + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchTxnVerify(cluster, batchNode, batchPolicy, keys, versions, records, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, false, status); + } + } + + public sealed class BatchTxnRoll : BatchCommand + { + private readonly Txn txn; + private readonly Key[] keys; + private readonly BatchRecord[] records; + private readonly BatchAttr attr; + + public BatchTxnRoll( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Txn txn, + Key[] keys, + BatchRecord[] records, + BatchAttr attr, + BatchStatus status + ) : base(cluster, batch, batchPolicy, status, false) + { + this.txn = txn; + this.keys = keys; + this.records = records; + this.attr = attr; + } + + protected internal override bool IsWrite() + { + return attr.hasWrite; + } + + protected internal override void WriteBuffer() + { + SetBatchTxnRoll(batchPolicy, txn, keys, batch, attr); + } + + protected internal override bool ParseRow() + { + SkipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == 0) + { + record.resultCode = resultCode; + } + else + { + record.SetError(resultCode, Command.BatchInDoubt(attr.hasWrite, commandSentCounter)); + status.SetRowError(); + } + return true; + } + + protected internal override void InDoubt() + { + if (!attr.hasWrite) + { + return; + } + + foreach (int index in batch.offsets) + { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) + { + record.inDoubt = true; + } + } + } + + protected internal override BatchCommand CreateCommand(BatchNode batchNode) + { + return new BatchTxnRoll(cluster, batchNode, batchPolicy, txn, keys, records, attr, status); + } + + protected internal override List GenerateBatchNodes() + { + return BatchNode.GenerateList(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status); + } + } + + //------------------------------------------------------- + // Batch Base Command + //------------------------------------------------------- + + public abstract class BatchCommand : MultiCommand + { + internal readonly BatchNode batch; + internal readonly BatchPolicy batchPolicy; + internal readonly BatchStatus status; + internal BatchExecutor parent; + internal uint sequenceAP; + internal uint sequenceSC; + internal bool splitRetry; + + public BatchCommand + ( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + BatchStatus status, + bool isOperation + ) : base(cluster, batchPolicy, batch.node, isOperation) + { + this.batch = batch; + this.batchPolicy = batchPolicy; + this.status = status; + } + + public void Run(object obj) + { + try + { + Execute(); + } + catch (AerospikeException ae) + { + if (ae.InDoubt) + { + SetInDoubt(); + } + status.SetException(ae); + } + catch (Exception e) + { + SetInDoubt(); + status.SetException(e); + } + finally + { + parent.OnComplete(); + } + } + + protected void ParseFieldsRead(Key key) + { + if (policy.Txn != null) + { + long? version = ParseVersion(fieldCount); + policy.Txn.OnRead(key, version); + } + else + { + SkipKey(fieldCount); + } + } + + protected void ParseFields(BatchRecord br) + { + if (policy.Txn != null) + { + long? version = ParseVersion(fieldCount); + + if (br.hasWrite) + { + policy.Txn.OnWrite(br.key, version, resultCode); + } + else + { + policy.Txn.OnRead(br.key, version); + } + } + else + { + SkipKey(fieldCount); + } + } + + protected override Latency.LatencyType GetLatencyType() + { + return Latency.LatencyType.BATCH; + } + + protected internal override bool PrepareRetry(bool timeout) + { + if (!((batchPolicy.replica == Replica.SEQUENCE || batchPolicy.replica == Replica.PREFER_RACK) && + (parent == null || !parent.IsDone()))) + { + // Perform regular retry to same node. + return true; + } + sequenceAP++; + + if (!timeout || batchPolicy.readModeSC != ReadModeSC.LINEARIZE) + { + sequenceSC++; + } + return false; + } + + protected internal override bool RetryBatch + ( + Cluster cluster, + int socketTimeout, + int totalTimeout, + DateTime deadline, + int iteration, + int commandSentCounter + ) + { + // Retry requires keys for this node to be split among other nodes. + // This is both recursive and exponential. + List batchNodes = GenerateBatchNodes(); + + if (batchNodes.Count == 1 && batchNodes[0].node == batch.node) + { + // Batch node is the same. Go through normal retry. + return false; + } + + splitRetry = true; + + // Run batch requests sequentially in same thread. + foreach (BatchNode batchNode in batchNodes) + { + BatchCommand command = CreateCommand(batchNode); + command.parent = parent; + command.sequenceAP = sequenceAP; + command.sequenceSC = sequenceSC; + command.socketTimeout = socketTimeout; + command.totalTimeout = totalTimeout; + command.iteration = iteration; + command.commandSentCounter = commandSentCounter; + command.deadline = deadline; + + try + { + cluster.AddRetry(); + command.ExecuteCommand(); + } + catch (AerospikeException ae) + { + if (ae.InDoubt) + { + SetInDoubt(); + } + status.SetException(ae); + + if (!batchPolicy.respondAllKeys) + { + throw; + } + } + catch (Exception e) + { + if (!command.splitRetry) + { + SetInDoubt(); + } + status.SetException(e); + + if (!batchPolicy.respondAllKeys) + { + throw; + } + } + } + return true; + } + + protected internal void SetInDoubt() + { + // Set error/inDoubt for keys associated this batch command when + // the command was not retried and split. If a split retry occurred, + // those new subcommands have already set inDoubt on the affected + // subset of keys. + if (!splitRetry) + { + InDoubt(); + } + } + + protected internal virtual void InDoubt() + { + // Do nothing by default. Batch writes will override this method. + } + + protected internal abstract BatchCommand CreateCommand(BatchNode batchNode); + protected internal abstract List GenerateBatchNodes(); + } +} diff --git a/AerospikeClient/Command/BatchExecutor.cs b/AerospikeClient/Command/BatchExecutor.cs index 59c4cb5d..cd508292 100644 --- a/AerospikeClient/Command/BatchExecutor.cs +++ b/AerospikeClient/Command/BatchExecutor.cs @@ -1,167 +1,170 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using System; -using System.Threading; - -namespace Aerospike.Client -{ - public sealed class BatchExecutor - { - public static void Execute(Cluster cluster, BatchPolicy policy, BatchCommand[] commands, BatchStatus status) - { - cluster.AddCommandCount(); - - if (policy.maxConcurrentThreads == 1 || commands.Length <= 1) - { - // Run batch requests sequentially in same thread. - foreach (BatchCommand command in commands) - { - try - { - command.Execute(); - } - catch (AerospikeException ae) - { - // Set error/inDoubt for keys associated this batch command when - // the command was not retried and split. If a split retry occurred, - // those new subcommands have already set error/inDoubt on the affected - // subset of keys. - if (!command.splitRetry) - { - command.SetInDoubt(ae.InDoubt); - } - status.SetException(ae); - - if (!policy.respondAllKeys) - { - throw; - } - } - catch (Exception e) - { - if (!command.splitRetry) - { - command.SetInDoubt(true); - } - status.SetException(e); - - if (!policy.respondAllKeys) - { - throw; - } - } - } - status.CheckException(); - return; - } - - // Run batch requests in parallel in separate threads. - BatchExecutor executor = new BatchExecutor(policy, commands, status); - executor.Execute(); - } - - public static void Execute(BatchCommand command, BatchStatus status) - { - command.Execute(); - status.CheckException(); - } - - private readonly BatchStatus status; - private readonly int maxConcurrentThreads; - private readonly BatchCommand[] commands; - private int completedCount; - private volatile int done; - private bool completed; - - private BatchExecutor(BatchPolicy policy, BatchCommand[] commands, BatchStatus status) - { - this.commands = commands; - this.status = status; - this.maxConcurrentThreads = (policy.maxConcurrentThreads == 0 || policy.maxConcurrentThreads >= commands.Length) ? commands.Length : policy.maxConcurrentThreads; - } - - internal void Execute() - { - // Start threads. - for (int i = 0; i < maxConcurrentThreads; i++) - { - BatchCommand cmd = commands[i]; - cmd.parent = this; - ThreadPool.UnsafeQueueUserWorkItem(cmd.Run, null); - } - - // Multiple threads write to the batch record array/list, so one might think that memory barriers - // are needed. That should not be necessary because of this synchronized waitTillComplete(). - WaitTillComplete(); - - // Throw an exception if an error occurred. - status.CheckException(); - } - - internal void OnComplete() - { - int finished = Interlocked.Increment(ref completedCount); - - if (finished < commands.Length) - { - int nextThread = finished + maxConcurrentThreads - 1; - - // Determine if a new thread needs to be started. - if (nextThread < commands.Length && done == 0) - { - // Start new thread. - BatchCommand cmd = commands[nextThread]; - cmd.parent = this; - ThreadPool.UnsafeQueueUserWorkItem(cmd.Run, null); - } - } - else - { - // Ensure executor succeeds or fails exactly once. - if (Interlocked.Exchange(ref done, 1) == 0) - { - NotifyCompleted(); - } - } - } - - internal bool IsDone() - { - return done != 0; - } - - private void WaitTillComplete() - { - lock (this) - { - while (!completed) - { - Monitor.Wait(this); - } - } - } - - private void NotifyCompleted() - { - lock (this) - { - completed = true; - Monitor.Pulse(this); - } - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using System; +using System.Threading; + +namespace Aerospike.Client +{ + public sealed class BatchExecutor + { + public static void Execute(Cluster cluster, BatchPolicy policy, BatchCommand[] commands, BatchStatus status) + { + cluster.AddCommandCount(); + + if (policy.maxConcurrentThreads == 1 || commands.Length <= 1) + { + // Run batch requests sequentially in same thread. + foreach (BatchCommand command in commands) + { + try + { + command.Execute(); + } + catch (AerospikeException ae) + { + // Set error/inDoubt for keys associated this batch command when + // the command was not retried and split. If a split retry occurred, + // those new subcommands have already set error/inDoubt on the affected + // subset of keys. + if (!command.splitRetry) + { + if (ae.InDoubt) + { + command.SetInDoubt(); + } + } + status.SetException(ae); + + if (!policy.respondAllKeys) + { + throw; + } + } + catch (Exception e) + { + if (!command.splitRetry) + { + command.SetInDoubt(); + } + status.SetException(e); + + if (!policy.respondAllKeys) + { + throw; + } + } + } + status.CheckException(); + return; + } + + // Run batch requests in parallel in separate threads. + BatchExecutor executor = new BatchExecutor(policy, commands, status); + executor.Execute(); + } + + public static void Execute(BatchCommand command, BatchStatus status) + { + command.Execute(); + status.CheckException(); + } + + private readonly BatchStatus status; + private readonly int maxConcurrentThreads; + private readonly BatchCommand[] commands; + private int completedCount; + private volatile int done; + private bool completed; + + private BatchExecutor(BatchPolicy policy, BatchCommand[] commands, BatchStatus status) + { + this.commands = commands; + this.status = status; + this.maxConcurrentThreads = (policy.maxConcurrentThreads == 0 || policy.maxConcurrentThreads >= commands.Length) ? commands.Length : policy.maxConcurrentThreads; + } + + internal void Execute() + { + // Start threads. + for (int i = 0; i < maxConcurrentThreads; i++) + { + BatchCommand cmd = commands[i]; + cmd.parent = this; + ThreadPool.UnsafeQueueUserWorkItem(cmd.Run, null); + } + + // Multiple threads write to the batch record array/list, so one might think that memory barriers + // are needed. That should not be necessary because of this synchronized waitTillComplete(). + WaitTillComplete(); + + // Throw an exception if an error occurred. + status.CheckException(); + } + + internal void OnComplete() + { + int finished = Interlocked.Increment(ref completedCount); + + if (finished < commands.Length) + { + int nextThread = finished + maxConcurrentThreads - 1; + + // Determine if a new thread needs to be started. + if (nextThread < commands.Length && done == 0) + { + // Start new thread. + BatchCommand cmd = commands[nextThread]; + cmd.parent = this; + ThreadPool.UnsafeQueueUserWorkItem(cmd.Run, null); + } + } + else + { + // Ensure executor succeeds or fails exactly once. + if (Interlocked.Exchange(ref done, 1) == 0) + { + NotifyCompleted(); + } + } + } + + internal bool IsDone() + { + return done != 0; + } + + private void WaitTillComplete() + { + lock (this) + { + while (!completed) + { + Monitor.Wait(this); + } + } + } + + private void NotifyCompleted() + { + lock (this) + { + completed = true; + Monitor.Pulse(this); + } + } + } +} diff --git a/AerospikeClient/Command/Command.cs b/AerospikeClient/Command/Command.cs index f5ca7ac6..aa271065 100644 --- a/AerospikeClient/Command/Command.cs +++ b/AerospikeClient/Command/Command.cs @@ -1,3215 +1,3296 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using System.Collections; - -#pragma warning disable 0618 - -namespace Aerospike.Client -{ - public abstract class Command - { - public static readonly int INFO1_READ = (1 << 0); // Contains a read operation. - public static readonly int INFO1_GET_ALL = (1 << 1); // Get all bins. - public static readonly int INFO1_SHORT_QUERY = (1 << 2); // Short query. - public static readonly int INFO1_BATCH = (1 << 3); // Batch read or exists. - public static readonly int INFO1_NOBINDATA = (1 << 5); // Do not read the bins. - public static readonly int INFO1_READ_MODE_AP_ALL = (1 << 6); // Involve all replicas in read operation. - public static readonly int INFO1_COMPRESS_RESPONSE = (1 << 7); // Tell server to compress it's response. - - public static readonly int INFO2_WRITE = (1 << 0); // Create or update record - public static readonly int INFO2_DELETE = (1 << 1); // Fling a record into the belly of Moloch. - public static readonly int INFO2_GENERATION = (1 << 2); // Update if expected generation == old. - public static readonly int INFO2_GENERATION_GT = (1 << 3); // Update if new generation >= old, good for restore. - public static readonly int INFO2_DURABLE_DELETE = (1 << 4); // Command resulting in record deletion leaves tombstone (Enterprise only). - public static readonly int INFO2_CREATE_ONLY = (1 << 5); // Create only. Fail if record already exists. - public static readonly int INFO2_RELAX_AP_LONG_QUERY = (1 << 6); // Treat as long query, but relac read consistency - public static readonly int INFO2_RESPOND_ALL_OPS = (1 << 7); // Return a result for every operation. - - public static readonly int INFO3_LAST = (1 << 0); // This is the last of a multi-part message. - public static readonly int INFO3_COMMIT_MASTER = (1 << 1); // Commit to master only before declaring success. - // On send: Do not return partition done in scan/query. - // On receive: Specified partition is done in scan/query. - public static readonly int INFO3_PARTITION_DONE = (1 << 2); - public static readonly int INFO3_UPDATE_ONLY = (1 << 3); // Update only. Merge bins. - public static readonly int INFO3_CREATE_OR_REPLACE = (1 << 4); // Create or completely replace record. - public static readonly int INFO3_REPLACE_ONLY = (1 << 5); // Completely replace existing record only. - public static readonly int INFO3_SC_READ_TYPE = (1 << 6); // See below. - public static readonly int INFO3_SC_READ_RELAX = (1 << 7); // See below. - - // Interpret SC_READ bits in info3. - // - // RELAX TYPE - // strict - // ------ - // 0 0 sequential (default) - // 0 1 linearize - // - // relaxed - // ------- - // 1 0 allow replica - // 1 1 allow unavailable - - public const int INFO4_MRT_VERIFY_READ = (1 << 0); // Send MRT version to the server to be verified. - public const int INFO4_MRT_ROLL_FORWARD = (1 << 1); // Roll forward MRT. - public const int INFO4_MRT_ROLL_BACK = (1 << 2); // Roll back MRT. - - public const byte STATE_READ_AUTH_HEADER = 1; - public const byte STATE_READ_HEADER = 2; - public const byte STATE_READ_DETAIL = 3; - public const byte STATE_COMPLETE = 4; - - public const byte BATCH_MSG_READ = 0x0; - public const byte BATCH_MSG_REPEAT = 0x1; - public const byte BATCH_MSG_INFO = 0x2; - public const byte BATCH_MSG_GEN = 0x4; - public const byte BATCH_MSG_TTL = 0x8; - public const byte BATCH_MSG_INFO4 = 0x10; - - public const int MSG_TOTAL_HEADER_SIZE = 30; - public const int FIELD_HEADER_SIZE = 5; - public const int OPERATION_HEADER_SIZE = 8; - public const int MSG_REMAINING_HEADER_SIZE = 22; - public const int COMPRESS_THRESHOLD = 128; - public const ulong CL_MSG_VERSION = 2UL; - public const ulong AS_MSG_TYPE = 3UL; - public const ulong MSG_TYPE_COMPRESSED = 4UL; - - internal byte[] dataBuffer; - internal int dataOffset; - internal readonly int maxRetries; - internal readonly int serverTimeout; - internal int socketTimeout; - internal int totalTimeout; - internal long? Version; - - public Command(int socketTimeout, int totalTimeout, int maxRetries) - { - this.maxRetries = maxRetries; - this.totalTimeout = totalTimeout; - - if (totalTimeout > 0) - { - this.socketTimeout = (socketTimeout < totalTimeout && socketTimeout > 0) ? socketTimeout : totalTimeout; - this.serverTimeout = this.socketTimeout; - } - else - { - this.socketTimeout = socketTimeout; - this.serverTimeout = 0; - } - } - - //-------------------------------------------------- - // Multi-record Transactions - //-------------------------------------------------- - - public void SetTxnAddKeys(WritePolicy policy, Key key, OperateArgs args) - { - Begin(); - int fieldCount = EstimateKeySize(key); - dataOffset += args.size; - WriteTxnMonitor(key, args.readAttr, args.writeAttr, fieldCount, args.operations.Length); - - foreach (Operation operation in args.operations) - { - WriteOperation(operation); - } - End(policy.compress); - } - - public void SetTxnVerify(Txn txn, Key key, long ver) - { - Begin(); - int fieldCount = EstimateKeySize(key); - - // Version field. - dataOffset += 7 + FIELD_HEADER_SIZE; - fieldCount++; - - //bool compress = SizeBuffer(policy); TODO - SizeBuffer(); - dataOffset += 8; - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; - dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA); - dataBuffer[dataOffset++] = (byte)0; - dataBuffer[dataOffset++] = (byte)Command.INFO3_SC_READ_TYPE; - dataBuffer[dataOffset++] = (byte)Command.INFO4_MRT_VERIFY_READ; - dataBuffer[dataOffset++] = 0; - dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); - - WriteKey(key); - WriteFieldVersion(ver); - End(); - } - - public void SetBatchTxnVerify( - BatchPolicy policy, - Txn txn, - Key[] keys, - long[] versions, - BatchNode batch - ) - { - BatchOffsetsNative offsets = new(batch); - SetBatchTxnVerify(policy, txn, keys, versions, offsets); - } - - public void SetBatchTxnVerify( - BatchPolicy policy, - Txn txn, - Key[] keys, - long[] versions, - BatchOffsets offsets - ) - { - // Estimate buffer size. - Begin(); - - // Batch field - dataOffset += FIELD_HEADER_SIZE + 5; - - Key keyPrev = null; - long? verPrev = null; - int max = offsets.Size(); - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - Key key = keys[offset]; - long? ver = versions[offset]; - - dataOffset += key.digest.Length + 4; - - if (CanRepeat(key, keyPrev, ver, verPrev)) - { - // Can set repeat previous namespace/bin names to save space. - dataOffset++; - } - else - { - // Write full header and namespace/set/bin names. - dataOffset += 9; // header(4) + info4(1) + fieldCount(2) + opCount(2) = 9 - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - - if (ver.HasValue) - { - dataOffset += 7 + FIELD_HEADER_SIZE; - } - keyPrev = key; - verPrev = ver; - } - } - - bool compress = SizeBuffer(policy); - - WriteBatchHeader(policy, totalTimeout, 1); - - int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = GetBatchFlags(policy); - keyPrev = null; - verPrev = null; - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - Key key = keys[offset]; - long? ver = versions[offset]; - - ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); - dataOffset += 4; - - byte[] digest = key.digest; - Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); - dataOffset += digest.Length; - - if (CanRepeat(key, keyPrev, ver, verPrev)) - { - // Can set repeat previous namespace/bin names to save space. - dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; - } - else - { - // Write full message. - dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4); - dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA); - dataBuffer[dataOffset++] = (byte)0; - dataBuffer[dataOffset++] = (byte)Command.INFO3_SC_READ_TYPE; - dataBuffer[dataOffset++] = (byte)Command.INFO4_MRT_VERIFY_READ; - - int fieldCount = 0; - - if (ver.HasValue) - { - fieldCount++; - } - - WriteBatchFields(key, fieldCount, 0); - - if (ver.HasValue) - { - WriteFieldVersion(ver.Value); - } - - keyPrev = key; - verPrev = ver; - } - } - - // Write real field size. - ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); - End(compress); - } - - public void SetTxnMarkRollForward(Txn txn, Key key) - { - Bin bin = new("fwd", true); - - Begin(); - int fieldCount = EstimateKeySize(key); - EstimateOperationSize(bin); - WriteTxnMonitor(key, 0, Command.INFO2_WRITE, fieldCount, 1); - WriteOperation(bin, Operation.Type.WRITE); - End(); - } - - public void SetTxnRoll(Key key, Txn txn, int txnAttr) - { - Begin(); - int fieldCount = EstimateKeySize(key); - - fieldCount += SizeTxn(key, txn, false); - - SizeBuffer(); - dataOffset += 8; - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; - dataBuffer[dataOffset++] = (byte)0; - dataBuffer[dataOffset++] = (byte)(Command.INFO2_WRITE | Command.INFO2_DURABLE_DELETE); - dataBuffer[dataOffset++] = (byte)0; - dataBuffer[dataOffset++] = (byte)txnAttr; - dataBuffer[dataOffset++] = 0; // clear the result code - dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); - - WriteKey(key); - WriteTxn(txn, false); - End(); - } - - public void SetBatchTxnRoll( - BatchPolicy policy, - Key[] keys, - BatchNode batch, - BatchAttr attr - ) - { - BatchOffsetsNative offsets = new(batch); - SetBatchTxnRoll(policy, keys, attr, offsets); - } - - public void SetBatchTxnRoll( - BatchPolicy policy, - Key[] keys, - BatchAttr attr, - BatchOffsets offsets - ) - { - // Estimate buffer size. - Begin(); - int fieldCount = 1; - int max = offsets.Size(); - Txn txn = policy.Txn; - long?[] versions = new long?[max]; - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - Key key = keys[offset]; - versions[i] = txn.GetReadVersion(key); - } - - // Batch field - dataOffset += FIELD_HEADER_SIZE + 5; - - Key keyPrev = null; - long? verPrev = null; - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - Key key = keys[offset]; - long? ver = versions[i]; - - dataOffset += key.digest.Length + 4; - - if (CanRepeat(key, keyPrev, ver, verPrev)) - { - // Can set repeat previous namespace/bin names to save space. - dataOffset++; - } - else - { - // Write full header and namespace/set/bin names. - dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - SizeTxnBatch(txn, ver); - dataOffset += 2; // gen(2) = 2 - keyPrev = key; - verPrev = ver; - } - } - - bool compress = SizeBuffer(policy); - - WriteBatchHeader(policy, totalTimeout, fieldCount); - - int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = GetBatchFlags(policy); - keyPrev = null; - verPrev = null; - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - Key key = keys[offset]; - long? ver = versions[i]; - - ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); - dataOffset += 4; - - byte[] digest = key.digest; - Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); - dataOffset += digest.Length; - - if (CanRepeat(key, keyPrev, ver, verPrev)) - { - // Can set repeat previous namespace/bin names to save space. - dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; - } - else - { - // Write full message. - WriteBatchWrite(key, txn, ver, attr, null, 0, 0); - keyPrev = key; - verPrev = ver; - } - } - - // Write real field size. - ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); - End(compress); - } - - public void SetTxnClose(Txn txn, Key key) - { - Begin(); - int fieldCount = EstimateKeySize(key); - WriteTxnMonitor(key, 0, Command.INFO2_WRITE | Command.INFO2_DELETE | Command.INFO2_DURABLE_DELETE, - fieldCount, 0); - End(); - } - - private void WriteTxnMonitor(Key key, int readAttr, int writeAttr, int fieldCount, int opCount) - { - SizeBuffer(); - dataOffset += 8; - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; - dataBuffer[dataOffset++] = (byte)readAttr; - dataBuffer[dataOffset++] = (byte)writeAttr; - dataBuffer[dataOffset++] = (byte)0; - dataBuffer[dataOffset++] = 0; - dataBuffer[dataOffset++] = 0; - dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)opCount, dataBuffer, dataOffset); - - WriteKey(key); - } - - //-------------------------------------------------- - // Writes - //-------------------------------------------------- - - public virtual void SetWrite(WritePolicy policy, Operation.Type operation, Key key, Bin[] bins) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key, true); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - - foreach (Bin bin in bins) - { - EstimateOperationSize(bin); - } - - bool compress = SizeBuffer(policy); - - WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, bins.Length); - WriteKey(policy, key, true); - - policy.filterExp?.Write(this); - - foreach (Bin bin in bins) - { - WriteOperation(bin, operation); - } - End(compress); - } - - public virtual void SetDelete(WritePolicy policy, Key key) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key, true); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - SizeBuffer(); - WriteHeaderWrite(policy, Command.INFO2_WRITE | Command.INFO2_DELETE, fieldCount, 0); - WriteKey(policy, key, true); - - policy.filterExp?.Write(this); - End(); - } - - public void SetDelete(Policy policy, Key key, BatchAttr attr) - { - Begin(); - Expression exp = GetBatchExpression(policy, attr); - int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); - SizeBuffer(); - WriteKeyAttr(policy, key, attr, exp, fieldCount, 0); - End(); - } - - public virtual void SetTouch(WritePolicy policy, Key key) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key, true); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - EstimateOperationSize(); - SizeBuffer(); - WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 1); - WriteKey(policy, key, true); - - policy.filterExp?.Write(this); - WriteOperation(Operation.Type.TOUCH); - End(); - } - - //-------------------------------------------------- - // Reads - //-------------------------------------------------- - - public virtual void SetExists(Policy policy, Key key) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key, false); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - SizeBuffer(); - WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0); - WriteKey(policy, key, false); - - policy.filterExp?.Write(this); - End(); - } - - public virtual void SetRead(Policy policy, Key key, string[] binNames) - { - int readAttr = Command.INFO1_READ; - int opCount = 0; - - if (binNames != null && binNames.Length > 0) - { - opCount = binNames.Length; - } - else - { - readAttr |= Command.INFO1_GET_ALL; - } - - Begin(); - int fieldCount = EstimateKeySize(policy, key, false); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - - if (opCount != 0) - { - foreach (string binName in binNames) - { - EstimateOperationSize(binName); - } - } - - SizeBuffer(); - WriteHeaderRead(policy, serverTimeout, readAttr, 0, 0, fieldCount, opCount); - WriteKey(policy, key, false); - - policy.filterExp?.Write(this); - - if (opCount != 0) - { - foreach (string binName in binNames) - { - WriteOperation(binName, Operation.Type.READ); - } - } - End(); - } - - public void SetRead(Policy policy, BatchRead br) - { - Begin(); - - BatchReadPolicy rp = br.policy; - BatchAttr attr = new(); - Expression exp; - int opCount; - - if (rp != null) - { - attr.SetRead(rp); - exp = rp.filterExp ?? policy.filterExp; - } - else - { - attr.SetRead(policy); - exp = policy.filterExp; - } - - if (br.binNames != null) - { - opCount = br.binNames.Length; - - foreach (string binName in br.binNames) - { - EstimateOperationSize(binName); - } - } - else if (br.ops != null) - { - attr.AdjustRead(br.ops); - opCount = br.ops.Length; - - foreach (Operation op in br.ops) - { - if (Operation.IsWrite(op.type)) - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in read"); - } - EstimateOperationSize(op); - } - } - else - { - attr.AdjustRead(br.readAllBins); - opCount = 0; - } - - int fieldCount = EstimateKeyAttrSize(policy, br.key, attr, exp); - - SizeBuffer(); - WriteKeyAttr(policy, br.key, attr, exp, fieldCount, opCount); - - if (br.binNames != null) - { - foreach (string binName in br.binNames) - { - WriteOperation(binName, Operation.Type.READ); - } - } - else if (br.ops != null) - { - foreach (Operation op in br.ops) - { - WriteOperation(op); - } - } - End(); - } - - public void SetRead(Policy policy, Key key, Operation[] ops) - { - Begin(); - - BatchAttr attr = new BatchAttr(); - attr.SetRead(policy); - attr.AdjustRead(ops); - - int fieldCount = EstimateKeyAttrSize(policy, key, attr, policy.filterExp); - - foreach (Operation op in ops) - { - if (Operation.IsWrite(op.type)) - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in read"); - } - EstimateOperationSize(op); - } - - SizeBuffer(); - WriteKeyAttr(policy, key, attr, policy.filterExp, fieldCount, ops.Length); - - foreach (Operation op in ops) - { - WriteOperation(op); - } - End(); - } - - public virtual void SetReadHeader(Policy policy, Key key) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key, false); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - SizeBuffer(); - WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0); - WriteKey(policy, key, false); - - policy.filterExp?.Write(this); - End(); - } - - //-------------------------------------------------- - // Operate - //-------------------------------------------------- - - public virtual void SetOperate(WritePolicy policy, Key key, OperateArgs args) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key, args.hasWrite); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - dataOffset += args.size; - - bool compress = SizeBuffer(policy); - - WriteHeaderReadWrite(policy, args, fieldCount); - WriteKey(policy, key, args.hasWrite); - - policy.filterExp?.Write(this); - - foreach (Operation operation in args.operations) - { - WriteOperation(operation); - } - End(compress); - } - - public void SetOperate(Policy policy, BatchAttr attr, Key key, Operation[] ops) - { - Begin(); - Expression exp = GetBatchExpression(policy, attr); - int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); - - dataOffset += attr.opSize; - bool compress = SizeBuffer(policy); - WriteKeyAttr(policy, key, attr, exp, fieldCount, ops.Length); - - foreach (Operation op in ops) - { - WriteOperation(op); - } - End(compress); - } - - - //-------------------------------------------------- - // UDF - //-------------------------------------------------- - - public virtual void SetUdf(WritePolicy policy, Key key, string packageName, string functionName, Value[] args) - { - Begin(); - int fieldCount = EstimateKeySize(policy, key, true); - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - byte[] argBytes = Packer.Pack(args); - fieldCount += EstimateUdfSize(packageName, functionName, argBytes); - - bool compress = SizeBuffer(policy); - - WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 0); - WriteKey(policy, key, true); - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - WriteField(packageName, FieldType.UDF_PACKAGE_NAME); - WriteField(functionName, FieldType.UDF_FUNCTION); - WriteField(argBytes, FieldType.UDF_ARGLIST); - End(compress); - } - - public void SetUdf(Policy policy, BatchAttr attr, Key key, string packageName, string functionName, Value[] args) - { - byte[] argBytes = Packer.Pack(args); - SetUdf(policy, attr, key, packageName, functionName, argBytes); - } - - public void SetUdf(Policy policy, BatchAttr attr, Key key, string packageName, string functionName, byte[] argBytes) - { - Begin(); - Expression exp = GetBatchExpression(policy, attr); - int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); - fieldCount += EstimateUdfSize(packageName, functionName, argBytes); - - bool compress = SizeBuffer(policy); - WriteKeyAttr(policy, key, attr, exp, fieldCount, 0); - WriteField(packageName, FieldType.UDF_PACKAGE_NAME); - WriteField(functionName, FieldType.UDF_FUNCTION); - WriteField(argBytes, FieldType.UDF_ARGLIST); - End(compress); - } - - //-------------------------------------------------- - // Batch Read Only - //-------------------------------------------------- - - public virtual void SetBatchRead(BatchPolicy policy, List records, BatchNode batch) - { - // Estimate full row size - int[] offsets = batch.offsets; - int max = batch.offsetsSize; - BatchRead prev = null; - - Begin(); - int fieldCount = 1; - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - - dataOffset += FIELD_HEADER_SIZE + 5; - - for (int i = 0; i < max; i++) - { - BatchRead record = records[offsets[i]]; - Key key = record.key; - string[] binNames = record.binNames; - Operation[] ops = record.ops; - - dataOffset += key.digest.Length + 4; - - // Avoid relatively expensive full equality checks for performance reasons. - // Use reference equality only in hope that common namespaces/bin names are set from - // fixed variables. It's fine if equality not determined correctly because it just - // results in more space used. The batch will still be correct. - if (prev != null && prev.key.ns == key.ns && prev.key.setName == key.setName && - prev.binNames == binNames && prev.readAllBins == record.readAllBins && - prev.ops == ops) - { - // Can set repeat previous namespace/bin names to save space. - dataOffset++; - } - else - { - // Estimate full header, namespace and bin names. - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - - if (binNames != null) - { - foreach (string binName in binNames) - { - EstimateOperationSize(binName); - } - } - else if (ops != null) - { - foreach (Operation op in ops) - { - EstimateReadOperationSize(op); - } - } - prev = record; - } - } - - bool compress = SizeBuffer(policy); - - int readAttr = Command.INFO1_READ; - - if (policy.readModeAP == ReadModeAP.ALL) - { - readAttr |= Command.INFO1_READ_MODE_AP_ALL; - } - - WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0); - - policy.filterExp?.Write(this); - - int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; - prev = null; - - for (int i = 0; i < max; i++) - { - int index = offsets[i]; - ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); - dataOffset += 4; - - BatchRead record = records[index]; - Key key = record.key; - string[] binNames = record.binNames; - Operation[] ops = record.ops; - byte[] digest = key.digest; - Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); - dataOffset += digest.Length; - - // Avoid relatively expensive full equality checks for performance reasons. - // Use reference equality only in hope that common namespaces/bin names are set from - // fixed variables. It's fine if equality not determined correctly because it just - // results in more space used. The batch will still be correct. - if (prev != null && prev.key.ns == key.ns && prev.key.setName == key.setName && - prev.binNames == binNames && prev.readAllBins == record.readAllBins && - prev.ops == ops) - { - // Can set repeat previous namespace/bin names to save space. - dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; - } - else - { - // Write full header, namespace and bin names. - dataBuffer[dataOffset++] = BATCH_MSG_READ; - - if (binNames != null && binNames.Length != 0) - { - dataBuffer[dataOffset++] = (byte)readAttr; - WriteBatchFields(key, 0, binNames.Length); - - foreach (string binName in binNames) - { - WriteOperation(binName, Operation.Type.READ); - } - } - else if (ops != null) - { - int offset = dataOffset++; - WriteBatchFields(key, 0, ops.Length); - dataBuffer[offset] = (byte)WriteReadOnlyOperations(ops, readAttr); - } - else - { - dataBuffer[dataOffset++] = (byte)(readAttr | (record.readAllBins ? Command.INFO1_GET_ALL : Command.INFO1_NOBINDATA)); - WriteBatchFields(key, 0, 0); - } - prev = record; - } - } - - // Write real field size. - ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); - End(compress); - } - - public virtual void SetBatchRead - ( - BatchPolicy policy, - Key[] keys, - BatchNode batch, - string[] binNames, - Operation[] ops, - int readAttr - ) - { - // Estimate full row size - int[] offsets = batch.offsets; - int max = batch.offsetsSize; - - // Estimate dataBuffer size. - Begin(); - int fieldCount = 1; - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - dataOffset += FIELD_HEADER_SIZE + 5; - - Key prev = null; - - for (int i = 0; i < max; i++) - { - Key key = keys[offsets[i]]; - - dataOffset += key.digest.Length + 4; - - // Try reference equality in hope that namespace for all keys is set from a fixed variable. - if (prev != null && prev.ns == key.ns && prev.setName == key.setName) - { - // Can set repeat previous namespace/bin names to save space. - dataOffset++; - } - else - { - // Estimate full header, namespace and bin names. - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - - if (binNames != null) - { - foreach (String binName in binNames) - { - EstimateOperationSize(binName); - } - } - else if (ops != null) - { - foreach (Operation op in ops) - { - EstimateReadOperationSize(op); - } - } - prev = key; - } - } - - bool compress = SizeBuffer(policy); - - if (policy.readModeAP == ReadModeAP.ALL) - { - readAttr |= Command.INFO1_READ_MODE_AP_ALL; - } - - WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0); - - policy.filterExp?.Write(this); - - int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; - prev = null; - - for (int i = 0; i < max; i++) - { - int index = offsets[i]; - ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); - dataOffset += 4; - - Key key = keys[index]; - byte[] digest = key.digest; - Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); - dataOffset += digest.Length; - - // Try reference equality in hope that namespace for all keys is set from a fixed variable. - if (prev != null && prev.ns == key.ns && prev.setName == key.setName) - { - // Can set repeat previous namespace/bin names to save space. - dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; - } - else - { - // Write full header, namespace and bin names. - dataBuffer[dataOffset++] = BATCH_MSG_READ; - - if (binNames != null && binNames.Length != 0) - { - dataBuffer[dataOffset++] = (byte)readAttr; - WriteBatchFields(key, 0, binNames.Length); - - foreach (String binName in binNames) - { - WriteOperation(binName, Operation.Type.READ); - } - } - else if (ops != null) - { - int offset = dataOffset++; - WriteBatchFields(key, 0, ops.Length); - dataBuffer[offset] = (byte)WriteReadOnlyOperations(ops, readAttr); - } - else - { - dataBuffer[dataOffset++] = (byte)readAttr; - WriteBatchFields(key, 0, 0); - } - prev = key; - } - } - - // Write real field size. - ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); - End(compress); - } - - //-------------------------------------------------- - // Batch Read/Write Operations - //-------------------------------------------------- - - public virtual void SetBatchOperate( - BatchPolicy policy, - IList records, - BatchNode batch) - { - BatchOffsetsNative offsets = new BatchOffsetsNative(batch); - SetBatchOperate(policy, records, offsets); - } - - public void SetBatchOperate( - BatchPolicy policy, - IList records, - BatchOffsets offsets) - { - Begin(); - int max = offsets.Size(); - Txn txn = policy.Txn; - long?[] versions = null; - - if (txn != null) - { - versions = new long?[max]; - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - BatchRecord record = (BatchRecord)records[offset]; - versions[i] = txn.GetReadVersion(record.key); - } - } - - int fieldCount = 1; - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - - dataOffset += FIELD_HEADER_SIZE + 5; - - BatchRecord prev = null; - long? verPrev = null; - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - BatchRecord record = (BatchRecord)records[offset]; - Key key = record.key; - long? ver = versions?[i]; - - dataOffset += key.digest.Length + 4; - - if (CanRepeat(policy, key, record, prev, ver, verPrev)) - { - // Can set repeat previous namespace/bin names to save space. - dataOffset++; - } - else - { - // Estimate full header, namespace and bin names. - dataOffset += 12; - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - SizeTxnBatch(txn, ver); - dataOffset += record.Size(policy); - prev = record; - verPrev = ver; - } - } - bool compress = SizeBuffer(policy); - - WriteBatchHeader(policy, totalTimeout, fieldCount); - - policy.filterExp?.Write(this); - - int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = GetBatchFlags(policy); - - BatchAttr attr = new(); - prev = null; - verPrev = null; - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - BatchRecord record = (BatchRecord)records[offset]; - long? ver = versions?[i]; - ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); - dataOffset += 4; - - Key key = record.key; - byte[] digest = key.digest; - Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); - dataOffset += digest.Length; - - - if (CanRepeat(policy, key, record, prev, ver, verPrev)) - { - // Can set repeat previous namespace/bin names to save space. - dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; - } - else - { - // Write full message. - switch (record.GetBatchType()) - { - case BatchRecord.Type.BATCH_READ: - { - BatchRead br = (BatchRead)record; - - if (br.policy != null) - { - attr.SetRead(br.policy); - } - else - { - attr.SetRead(policy); - } - - if (br.binNames != null) - { - if (br.binNames.Length > 0) - { - WriteBatchBinNames(key, txn, ver, br.binNames, attr, attr.filterExp); - } - else - { - attr.AdjustRead(true); - WriteBatchRead(key, txn, ver, attr, attr.filterExp, 0); - } - } - else if (br.ops != null) - { - attr.AdjustRead(br.ops); - WriteBatchOperations(key, txn, ver, br.ops, attr, attr.filterExp); - } - else - { - attr.AdjustRead(br.readAllBins); - WriteBatchRead(key, txn, ver, attr, attr.filterExp, 0); - } - break; - } - - case BatchRecord.Type.BATCH_WRITE: - { - BatchWrite bw = (BatchWrite)record; - - if (bw.policy != null) - { - attr.SetWrite(bw.policy); - } - else - { - attr.SetWrite(policy); - } - attr.AdjustWrite(bw.ops); - WriteBatchOperations(key, txn, ver, bw.ops, attr, attr.filterExp); - break; - } - - case BatchRecord.Type.BATCH_UDF: - { - BatchUDF bu = (BatchUDF)record; - - if (bu.policy != null) - { - attr.SetUDF(bu.policy); - } - else - { - attr.SetUDF(policy); - } - WriteBatchWrite(key, policy.Txn, null, attr, attr.filterExp, 3, 0); - WriteField(bu.packageName, FieldType.UDF_PACKAGE_NAME); - WriteField(bu.functionName, FieldType.UDF_FUNCTION); - WriteField(bu.argBytes, FieldType.UDF_ARGLIST); - break; - } - - case BatchRecord.Type.BATCH_DELETE: - { - BatchDelete bd = (BatchDelete)record; - - if (bd.policy != null) - { - attr.SetDelete(bd.policy); - } - else - { - attr.SetDelete(policy); - } - WriteBatchWrite(key, txn, ver, attr, attr.filterExp, 0, 0); - break; - } - } - prev = record; - verPrev = ver; - } - } - - // Write real field size. - ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); - End(compress); - } - - public virtual void SetBatchOperate - ( - BatchPolicy policy, - Key[] keys, - BatchNode batch, - string[] binNames, - Operation[] ops, - BatchAttr attr - ) - { - BatchOffsetsNative offsets = new BatchOffsetsNative(batch); - SetBatchOperate(policy, keys, binNames, ops, attr, offsets); - } - - public void SetBatchOperate( - BatchPolicy policy, - Key[] keys, - string[] binNames, - Operation[] ops, - BatchAttr attr, - BatchOffsets offsets - ) - { - // Estimate full row size - int max = offsets.Size(); - Txn txn = policy.Txn; - long?[] versions = null; - - Begin(); - - if (txn != null) - { - versions = new long?[max]; - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - Key key = keys[offset]; - versions[i] = txn.GetReadVersion(key); - } - } - - Expression exp = GetBatchExpression(policy, attr); - int fieldCount = 1; - - if (exp != null) - { - dataOffset += exp.Size(); - fieldCount++; - } - - dataOffset += FIELD_HEADER_SIZE + 5; - - Key keyPrev = null; - long? verPrev = null; - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - Key key = keys[offset]; - long? ver = versions?[i]; - - dataOffset += key.digest.Length + 4; - - if (CanRepeat(attr, key, keyPrev, ver, verPrev)) - { - // Can set repeat previous namespace/bin names to save space. - dataOffset++; - } - else - { - // Write full header and namespace/set/bin names. - dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - SizeTxnBatch(txn, ver); - - if (attr.sendKey) - { - dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; - } - - if (binNames != null) - { - foreach (string binName in binNames) - { - EstimateOperationSize(binName); - } - } - else if (ops != null) - { - foreach (Operation op in ops) - { - if (Operation.IsWrite(op.type)) - { - if (!attr.hasWrite) - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in batch read"); - } - dataOffset += 2; // Extra write specific fields. - } - EstimateOperationSize(op); - } - } - else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) - { - dataOffset += 2; // Extra write specific fields. - } - keyPrev = key; - verPrev = ver; - } - } - - bool compress = SizeBuffer(policy); - - WriteBatchHeader(policy, totalTimeout, fieldCount); - - exp?.Write(this); - - int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = GetBatchFlags(policy); - keyPrev = null; - verPrev = null; - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - Key key = keys[offset]; - long? ver = versions?[i]; - - ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); - dataOffset += 4; - - byte[] digest = key.digest; - Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); - dataOffset += digest.Length; - - if (CanRepeat(attr, key, keyPrev, ver, verPrev)) - { - // Can set repeat previous namespace/bin names to save space. - dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; - } - else - { - // Write full message. - if (binNames != null) - { - WriteBatchBinNames(key, txn, ver, binNames, attr, null); - } - else if (ops != null) - { - WriteBatchOperations(key, txn, ver, ops, attr, null); - } - else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) - { - WriteBatchWrite(key, txn, ver, attr, null, 0, 0); - } - else - { - WriteBatchRead(key, txn, ver, attr, null, 0); - } - keyPrev = key; - verPrev = ver; - } - } - - // Write real field size. - ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); - End(compress); - } - - public virtual void SetBatchUDF( - BatchPolicy policy, - Key[] keys, - BatchNode batch, - string packageName, - string functionName, - byte[] argBytes, - BatchAttr attr - ) - { - BatchOffsetsNative offsets = new BatchOffsetsNative(batch); - SetBatchUDF(policy, keys, packageName, functionName, argBytes, attr, offsets); - } - - public virtual void SetBatchUDF - ( - BatchPolicy policy, - Key[] keys, - string packageName, - string functionName, - byte[] argBytes, - BatchAttr attr, - BatchOffsets offsets - ) - { - // Estimate buffer size. - Begin(); - int max = offsets.Size(); - Txn txn = policy.Txn; - long?[] versions = null; - - if (txn != null) - { - versions = new long?[max]; - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - Key key = keys[offset]; - versions[i] = txn.GetReadVersion(key); - } - } - - Expression exp = GetBatchExpression(policy, attr); - int fieldCount = 1; - - if (exp != null) - { - dataOffset += exp.Size(); - fieldCount++; - } - - dataOffset += FIELD_HEADER_SIZE + 5; - - Key keyPrev = null; - long? verPrev = null; - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - Key key = keys[offset]; - long? ver = versions?[i]; - - dataOffset += key.digest.Length + 4; - - if (CanRepeat(attr, key, keyPrev, ver, verPrev)) - { - // Can set repeat previous namespace/bin names to save space. - dataOffset++; - } - else - { - // Write full header and namespace/set/bin names. - dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - SizeTxnBatch(txn, ver); - - if (attr.sendKey) - { - dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; - } - dataOffset += 2; // gen(2) = 2 - EstimateUdfSize(packageName, functionName, argBytes); - keyPrev = key; - verPrev = ver; - } - } - - bool compress = SizeBuffer(policy); - - WriteBatchHeader(policy, totalTimeout, fieldCount); - - exp?.Write(this); - - int fieldSizeOffset = dataOffset; - WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - - ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = GetBatchFlags(policy); - keyPrev = null; - verPrev = null; - - for (int i = 0; i < max; i++) - { - int offset = offsets.Get(i); - Key key = keys[offset]; - long? ver = versions?[i]; - - ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); - dataOffset += 4; - - byte[] digest = key.digest; - Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); - dataOffset += digest.Length; - - if (CanRepeat(attr, key, keyPrev, ver, verPrev)) - { - // Can set repeat previous namespace/bin names to save space. - dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; - } - else - { - // Write full message. - WriteBatchWrite(key, txn, ver, attr, null, 3, 0); - WriteField(packageName, FieldType.UDF_PACKAGE_NAME); - WriteField(functionName, FieldType.UDF_FUNCTION); - WriteField(argBytes, FieldType.UDF_ARGLIST); - keyPrev = key; - verPrev = ver; - } - } - - // Write real field size. - ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); - End(compress); - } - - private static bool CanRepeat( - Policy policy, - Key key, - BatchRecord record, - BatchRecord prev, - long? ver, - long? verPrev - ) - { - // Avoid relatively expensive full equality checks for performance reasons. - // Use reference equality only in hope that common namespaces/bin names are set from - // fixed variables. It's fine if equality not determined correctly because it just - // results in more space used. The batch will still be correct. - // Same goes for ver reference equality check. - return !policy.sendKey && verPrev == ver && prev != null && prev.key.ns == key.ns && - prev.key.setName == key.setName && record.Equals(prev); - } - - private static bool CanRepeat(BatchAttr attr, Key key, Key keyPrev, long? ver, long? verPrev) - { - return !attr.sendKey && verPrev == ver && keyPrev != null && keyPrev.ns == key.ns && - keyPrev.setName == key.setName; - } - - private static bool CanRepeat(Key key, Key keyPrev, long? ver, long? verPrev) - { - return verPrev == ver && keyPrev != null && keyPrev.ns == key.ns && - keyPrev.setName == key.setName; - } - - private static Expression GetBatchExpression(Policy policy, BatchAttr attr) - { - return (attr.filterExp != null) ? attr.filterExp : policy.filterExp; - } - - private static byte GetBatchFlags(BatchPolicy policy) - { - byte flags = 0x8; - - if (policy.allowInline) - { - flags |= 0x1; - } - - if (policy.allowInlineSSD) - { - flags |= 0x2; - } - - if (policy.respondAllKeys) - { - flags |= 0x4; - } - return flags; - } - - private void SizeTxnBatch(Txn txn, long? ver) - { - if (txn != null) - { - dataOffset++; // Add info4 byte for MRT. - dataOffset += 8 + FIELD_HEADER_SIZE; - - if (ver.HasValue) - { - dataOffset += 7 + FIELD_HEADER_SIZE; - } - - if (txn.Deadline != 0) - { - dataOffset += 4 + FIELD_HEADER_SIZE; - } - } - } - - private void WriteBatchHeader(Policy policy, int timeout, int fieldCount) - { - int readAttr = Command.INFO1_BATCH; - - if (policy.compress) - { - readAttr |= Command.INFO1_COMPRESS_RESPONSE; - } - - // Write all header data except total size which must be written last. - dataOffset += 8; - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. - dataBuffer[dataOffset++] = (byte)readAttr; - dataBuffer[dataOffset++] = (byte)0; - dataBuffer[dataOffset++] = (byte)0; - - Array.Clear(dataBuffer, dataOffset, 10); - dataOffset += 10; - dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); - } - - private void WriteBatchBinNames(Key key, Txn txn, long? ver, string[] binNames, BatchAttr attr, Expression filter) - { - WriteBatchRead(key, txn, ver, attr, filter, binNames.Length); - - foreach (string binName in binNames) - { - WriteOperation(binName, Operation.Type.READ); - } - } - - private void WriteBatchOperations(Key key, Txn txn, long? ver, Operation[] ops, BatchAttr attr, Expression filter) - { - if (attr.hasWrite) - { - WriteBatchWrite(key, txn, ver, attr, filter, 0, ops.Length); - } - else - { - WriteBatchRead(key, txn, ver, attr, filter, ops.Length); - } - - foreach (Operation op in ops) - { - WriteOperation(op); - } - } - - private void WriteBatchRead(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int opCount) - { - if (txn != null) - { - dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_TTL); - dataBuffer[dataOffset++] = (byte)attr.readAttr; - dataBuffer[dataOffset++] = (byte)attr.writeAttr; - dataBuffer[dataOffset++] = (byte)attr.infoAttr; - dataBuffer[dataOffset++] = (byte)attr.txnAttr; - ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); - dataOffset += 4; - WriteBatchFieldsTxn(key, txn, ver, attr, filter, 0, opCount); - } - else - { - dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_TTL); - dataBuffer[dataOffset++] = (byte)attr.readAttr; - dataBuffer[dataOffset++] = (byte)attr.writeAttr; - dataBuffer[dataOffset++] = (byte)attr.infoAttr; - ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); - dataOffset += 4; - WriteBatchFieldsReg(key, attr, filter, 0, opCount); - } - } - - private void WriteBatchWrite(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) - { - if (txn != null) - { - dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_GEN | BATCH_MSG_TTL); - dataBuffer[dataOffset++] = (byte)attr.readAttr; - dataBuffer[dataOffset++] = (byte)attr.writeAttr; - dataBuffer[dataOffset++] = (byte)attr.infoAttr; - dataBuffer[dataOffset++] = (byte)attr.txnAttr; - ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset); - dataOffset += 2; - ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); - dataOffset += 4; - WriteBatchFieldsTxn(key, txn, ver, attr, filter, fieldCount, opCount); - } - else - { - dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_GEN | BATCH_MSG_TTL); - dataBuffer[dataOffset++] = (byte)attr.readAttr; - dataBuffer[dataOffset++] = (byte)attr.writeAttr; - dataBuffer[dataOffset++] = (byte)attr.infoAttr; - ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset); - dataOffset += 2; - ByteUtil.ShortToBytes((ushort)attr.expiration, dataBuffer, dataOffset); - dataOffset += 4; - WriteBatchFieldsReg(key, attr, filter, fieldCount, opCount); - } - } - - private void WriteBatchFieldsTxn(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) - { - if (txn != null) - { - fieldCount++; - - if (ver.HasValue) - { - fieldCount++; - } - - if (attr.hasWrite && txn.Deadline != 0) - { - fieldCount++; - } - } - - if (filter != null) - { - fieldCount++; - } - - if (attr.sendKey) - { - fieldCount++; - } - - WriteBatchFields(key, fieldCount, opCount); - - WriteFieldLE(txn.Id, FieldType.MRT_ID); - - if (ver.HasValue) - { - WriteFieldVersion(ver.Value); - } - - if (attr.hasWrite && txn.Deadline != 0) - { - WriteFieldLE(txn.Deadline, FieldType.MRT_DEADLINE); - } - - filter?.Write(this); - - if (attr.sendKey) - { - WriteField(key.userKey, FieldType.KEY); - } - } - - private void WriteBatchFieldsReg( - Key key, - BatchAttr attr, - Expression filter, - int fieldCount, - int opCount - ) { - if (filter != null) { - fieldCount++; - } - - if (attr.sendKey) { - fieldCount++; - } - - WriteBatchFields(key, fieldCount, opCount); - - filter?.Write(this); - - if (attr.sendKey) { - WriteField(key.userKey, FieldType.KEY); - } - } - - private void WriteBatchFields(Key key, int fieldCount, int opCount) - { - fieldCount += 2; - ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += 2; - ByteUtil.ShortToBytes((ushort)opCount, dataBuffer, dataOffset); - dataOffset += 2; - WriteField(key.ns, FieldType.NAMESPACE); - WriteField(key.setName, FieldType.TABLE); - } - - //-------------------------------------------------- - // Scan - //-------------------------------------------------- - - public virtual void SetScan - ( - Cluster cluster, - ScanPolicy policy, - string ns, - string setName, - string[] binNames, - ulong taskId, - NodePartitions nodePartitions - ) - { - Begin(); - int fieldCount = 0; - int partsFullSize = nodePartitions.partsFull.Count * 2; - int partsPartialSize = nodePartitions.partsPartial.Count * 20; - long maxRecords = nodePartitions.recordMax; - - if (ns != null) - { - dataOffset += ByteUtil.EstimateSizeUtf8(ns) + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (setName != null) - { - dataOffset += ByteUtil.EstimateSizeUtf8(setName) + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (partsFullSize > 0) - { - dataOffset += partsFullSize + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (partsPartialSize > 0) - { - dataOffset += partsPartialSize + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (maxRecords > 0) - { - dataOffset += 8 + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (policy.recordsPerSecond > 0) - { - dataOffset += 4 + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - - // Estimate scan timeout size. - dataOffset += 4 + FIELD_HEADER_SIZE; - fieldCount++; - - // Estimate taskId size. - dataOffset += 8 + FIELD_HEADER_SIZE; - fieldCount++; - - if (binNames != null) - { - foreach (string binName in binNames) - { - EstimateOperationSize(binName); - } - } - - SizeBuffer(); - int readAttr = Command.INFO1_READ; - - if (!policy.includeBinData) - { - readAttr |= Command.INFO1_NOBINDATA; - } - - // Clusters that support partition queries also support not sending partition done messages. - int operationCount = (binNames == null) ? 0 : binNames.Length; - WriteHeaderRead(policy, totalTimeout, readAttr, 0, Command.INFO3_PARTITION_DONE, fieldCount, operationCount); - - if (ns != null) - { - WriteField(ns, FieldType.NAMESPACE); - } - - if (setName != null) - { - WriteField(setName, FieldType.TABLE); - } - - if (partsFullSize > 0) - { - WriteFieldHeader(partsFullSize, FieldType.PID_ARRAY); - - foreach (PartitionStatus part in nodePartitions.partsFull) - { - ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); - dataOffset += 2; - } - } - - if (partsPartialSize > 0) - { - WriteFieldHeader(partsPartialSize, FieldType.DIGEST_ARRAY); - - foreach (PartitionStatus part in nodePartitions.partsPartial) { - Array.Copy(part.digest, 0, dataBuffer, dataOffset, 20); - dataOffset += 20; - } - } - - if (maxRecords > 0) - { - WriteField((ulong)maxRecords, FieldType.MAX_RECORDS); - } - - if (policy.recordsPerSecond > 0) - { - WriteField(policy.recordsPerSecond, FieldType.RECORDS_PER_SECOND); - } - - if (policy.filterExp != null) - { - policy.filterExp.Write(this); - } - - // Write scan timeout - WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); - - // Write taskId field - WriteField(taskId, FieldType.QUERY_ID); - - if (binNames != null) - { - foreach (string binName in binNames) - { - WriteOperation(binName, Operation.Type.READ); - } - } - End(); - } - - //-------------------------------------------------- - // Query - //-------------------------------------------------- - - protected virtual internal void SetQuery - ( - Cluster cluster, - Policy policy, - Statement statement, - ulong taskId, - bool background, - NodePartitions nodePartitions - ) - { - byte[] functionArgBuffer = null; - int fieldCount = 0; - int filterSize = 0; - int binNameSize = 0; - bool isNew = cluster.hasPartitionQuery; - - Begin(); - - if (statement.ns != null) - { - dataOffset += ByteUtil.EstimateSizeUtf8(statement.ns) + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (statement.setName != null) - { - dataOffset += ByteUtil.EstimateSizeUtf8(statement.setName) + FIELD_HEADER_SIZE; - fieldCount++; - } - - // Estimate recordsPerSecond field size. This field is used in new servers and not used - // (but harmless to add) in old servers. - if (statement.recordsPerSecond > 0) - { - dataOffset += 4 + FIELD_HEADER_SIZE; - fieldCount++; - } - - // Estimate socket timeout field size. This field is used in new servers and not used - // (but harmless to add) in old servers. - dataOffset += 4 + FIELD_HEADER_SIZE; - fieldCount++; - - // Estimate taskId field. - dataOffset += 8 + FIELD_HEADER_SIZE; - fieldCount++; - - byte[] packedCtx = null; - - if (statement.filter != null) - { - IndexCollectionType type = statement.filter.CollectionType; - - // Estimate INDEX_TYPE field. - if (type != IndexCollectionType.DEFAULT) - { - dataOffset += FIELD_HEADER_SIZE + 1; - fieldCount++; - } - - // Estimate INDEX_RANGE field. - dataOffset += FIELD_HEADER_SIZE; - filterSize++; // num filters - filterSize += statement.filter.EstimateSize(); - dataOffset += filterSize; - fieldCount++; - - if (!isNew) - { - // Query bin names are specified as a field (Scan bin names are specified later as operations) - // in old servers. Estimate size for selected bin names. - if (statement.binNames != null && statement.binNames.Length > 0) - { - dataOffset += FIELD_HEADER_SIZE; - binNameSize++; // num bin names - - foreach (string binName in statement.binNames) - { - binNameSize += ByteUtil.EstimateSizeUtf8(binName) + 1; - } - dataOffset += binNameSize; - fieldCount++; - } - } - - packedCtx = statement.filter.PackedCtx; - - if (packedCtx != null) - { - dataOffset += FIELD_HEADER_SIZE + packedCtx.Length; - fieldCount++; - } - } - - // Estimate aggregation/background function size. - if (statement.functionName != null) - { - dataOffset += FIELD_HEADER_SIZE + 1; // udf type - dataOffset += ByteUtil.EstimateSizeUtf8(statement.packageName) + FIELD_HEADER_SIZE; - dataOffset += ByteUtil.EstimateSizeUtf8(statement.functionName) + FIELD_HEADER_SIZE; - - if (statement.functionArgs.Length > 0) - { - functionArgBuffer = Packer.Pack(statement.functionArgs); - } - else - { - functionArgBuffer = new byte[0]; - } - dataOffset += FIELD_HEADER_SIZE + functionArgBuffer.Length; - fieldCount += 4; - } - - if (policy.filterExp != null) - { - dataOffset += policy.filterExp.Size(); - fieldCount++; - } - - long maxRecords = 0; - int partsFullSize = 0; - int partsPartialDigestSize = 0; - int partsPartialBValSize = 0; - - if (nodePartitions != null) - { - partsFullSize = nodePartitions.partsFull.Count * 2; - partsPartialDigestSize = nodePartitions.partsPartial.Count * 20; - - if (statement.filter != null) - { - partsPartialBValSize = nodePartitions.partsPartial.Count * 8; - } - maxRecords = nodePartitions.recordMax; - } - - if (partsFullSize > 0) - { - dataOffset += partsFullSize + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (partsPartialDigestSize > 0) - { - dataOffset += partsPartialDigestSize + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (partsPartialBValSize > 0) - { - dataOffset += partsPartialBValSize + FIELD_HEADER_SIZE; - fieldCount++; - } - - // Estimate max records field size. This field is used in new servers and not used - // (but harmless to add) in old servers. - if (maxRecords > 0) - { - dataOffset += 8 + FIELD_HEADER_SIZE; - fieldCount++; - } - - // Operations (used in query execute) and bin names (used in scan/query) are mutually exclusive. - int operationCount = 0; - - if (statement.operations != null) - { - // Estimate size for background operations. - if (!background) - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Operations not allowed in foreground query"); - } - - foreach (Operation operation in statement.operations) - { - if (!Operation.IsWrite(operation.type)) - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Read operations not allowed in background query"); - } - EstimateOperationSize(operation); - } - operationCount = statement.operations.Length; - } - else if (statement.binNames != null && (isNew || statement.filter == null)) - { - // Estimate size for selected bin names (query bin names already handled for old servers). - foreach (string binName in statement.binNames) - { - EstimateOperationSize(binName); - } - operationCount = statement.binNames.Length; - } - - SizeBuffer(); - - if (background) - { - WriteHeaderWrite((WritePolicy)policy, Command.INFO2_WRITE, fieldCount, operationCount); - } - else - { - QueryPolicy qp = (QueryPolicy)policy; - int readAttr = Command.INFO1_READ; - int writeAttr = 0; - - if (!qp.includeBinData) - { - readAttr |= Command.INFO1_NOBINDATA; - } - - if (qp.shortQuery || qp.expectedDuration == QueryDuration.SHORT) - { - readAttr |= Command.INFO1_SHORT_QUERY; - } - else if (qp.expectedDuration == QueryDuration.LONG_RELAX_AP) - { - writeAttr |= Command.INFO2_RELAX_AP_LONG_QUERY; - } - - int infoAttr = (isNew || statement.filter == null) ? Command.INFO3_PARTITION_DONE : 0; - - WriteHeaderRead(policy, totalTimeout, readAttr, writeAttr, infoAttr, fieldCount, operationCount); - } - - if (statement.ns != null) - { - WriteField(statement.ns, FieldType.NAMESPACE); - } - - if (statement.setName != null) - { - WriteField(statement.setName, FieldType.TABLE); - } - - // Write records per second. - if (statement.recordsPerSecond > 0) - { - WriteField(statement.recordsPerSecond, FieldType.RECORDS_PER_SECOND); - } - - // Write socket idle timeout. - WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); - - // Write taskId field - WriteField(taskId, FieldType.QUERY_ID); - - if (statement.filter != null) - { - IndexCollectionType type = statement.filter.CollectionType; - - if (type != IndexCollectionType.DEFAULT) - { - WriteFieldHeader(1, FieldType.INDEX_TYPE); - dataBuffer[dataOffset++] = (byte)type; - } - - WriteFieldHeader(filterSize, FieldType.INDEX_RANGE); - dataBuffer[dataOffset++] = (byte)1; - dataOffset = statement.filter.Write(dataBuffer, dataOffset); - - if (!isNew) - { - // Query bin names are specified as a field (Scan bin names are specified later as operations) - // in old servers. - if (statement.binNames != null && statement.binNames.Length > 0) - { - WriteFieldHeader(binNameSize, FieldType.QUERY_BINLIST); - dataBuffer[dataOffset++] = (byte)statement.binNames.Length; - - foreach (string binName in statement.binNames) - { - int len = ByteUtil.StringToUtf8(binName, dataBuffer, dataOffset + 1); - dataBuffer[dataOffset] = (byte)len; - dataOffset += len + 1; - } - } - } - - if (packedCtx != null) - { - WriteFieldHeader(packedCtx.Length, FieldType.INDEX_CONTEXT); - Array.Copy(packedCtx, 0, dataBuffer, dataOffset, packedCtx.Length); - dataOffset += packedCtx.Length; - } - } - - if (statement.functionName != null) - { - WriteFieldHeader(1, FieldType.UDF_OP); - dataBuffer[dataOffset++] = background ? (byte)2 : (byte)1; - WriteField(statement.packageName, FieldType.UDF_PACKAGE_NAME); - WriteField(statement.functionName, FieldType.UDF_FUNCTION); - WriteField(functionArgBuffer, FieldType.UDF_ARGLIST); - } - - policy.filterExp?.Write(this); - - if (partsFullSize > 0) - { - WriteFieldHeader(partsFullSize, FieldType.PID_ARRAY); - - foreach (PartitionStatus part in nodePartitions.partsFull) - { - ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); - dataOffset += 2; - } - } - - if (partsPartialDigestSize > 0) - { - WriteFieldHeader(partsPartialDigestSize, FieldType.DIGEST_ARRAY); - - foreach (PartitionStatus part in nodePartitions.partsPartial) - { - Array.Copy(part.digest, 0, dataBuffer, dataOffset, 20); - dataOffset += 20; - } - } - - if (partsPartialBValSize > 0) - { - WriteFieldHeader(partsPartialBValSize, FieldType.BVAL_ARRAY); - - foreach (PartitionStatus part in nodePartitions.partsPartial) - { - ByteUtil.LongToLittleBytes(part.bval, dataBuffer, dataOffset); - dataOffset += 8; - } - } - - if (maxRecords > 0) - { - WriteField((ulong)maxRecords, FieldType.MAX_RECORDS); - } - - if (statement.operations != null) - { - foreach (Operation operation in statement.operations) - { - WriteOperation(operation); - } - } - else if (statement.binNames != null && (isNew || statement.filter == null)) - { - foreach (string binName in statement.binNames) - { - WriteOperation(binName, Operation.Type.READ); - } - } - End(); - } - - //-------------------------------------------------- - // Command Sizing - //-------------------------------------------------- - - private int EstimateKeyAttrSize(Policy policy, Key key, BatchAttr attr, Expression filterExp) - { - int fieldCount = EstimateKeySize(policy, key, attr.hasWrite); - - if (filterExp != null) - { - dataOffset += filterExp.Size(); - fieldCount++; - } - return fieldCount; - } - - private int EstimateKeySize(Policy policy, Key key, bool sendDeadline) - { - int fieldCount = EstimateKeySize(key); - - fieldCount += SizeTxn(key, policy.Txn, sendDeadline); - - if (policy.sendKey) - { - dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; - fieldCount++; - } - return fieldCount; - } - - private int EstimateKeySize(Key key) - { - int fieldCount = 0; - - if (key.ns != null) - { - dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (key.setName != null) - { - dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; - fieldCount++; - } - - dataOffset += key.digest.Length + FIELD_HEADER_SIZE; - fieldCount++; - - return fieldCount; - } - - private int EstimateUdfSize(string packageName, string functionName, byte[] bytes) - { - dataOffset += ByteUtil.EstimateSizeUtf8(packageName) + FIELD_HEADER_SIZE; - dataOffset += ByteUtil.EstimateSizeUtf8(functionName) + FIELD_HEADER_SIZE; - dataOffset += bytes.Length + FIELD_HEADER_SIZE; - return 3; - } - - private void EstimateOperationSize(Bin bin) - { - dataOffset += ByteUtil.EstimateSizeUtf8(bin.name) + OPERATION_HEADER_SIZE; - dataOffset += bin.value.EstimateSize(); - } - - private void EstimateOperationSize(Operation operation) - { - dataOffset += ByteUtil.EstimateSizeUtf8(operation.binName) + OPERATION_HEADER_SIZE; - dataOffset += operation.value.EstimateSize(); - } - - private void EstimateReadOperationSize(Operation operation) - { - if (Operation.IsWrite(operation.type)) - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in batch read"); - } - dataOffset += ByteUtil.EstimateSizeUtf8(operation.binName) + OPERATION_HEADER_SIZE; - dataOffset += operation.value.EstimateSize(); - } - - private void EstimateOperationSize(string binName) - { - dataOffset += ByteUtil.EstimateSizeUtf8(binName) + OPERATION_HEADER_SIZE; - } - - private void EstimateOperationSize() - { - dataOffset += OPERATION_HEADER_SIZE; - } - - //-------------------------------------------------- - // Command Writes - //-------------------------------------------------- - - /// - /// Header write for write commands. - /// - private void WriteHeaderWrite(WritePolicy policy, int writeAttr, int fieldCount, int operationCount) - { - // Set flags. - int generation = 0; - int infoAttr = 0; - - switch (policy.recordExistsAction) - { - case RecordExistsAction.UPDATE: - break; - case RecordExistsAction.UPDATE_ONLY: - infoAttr |= Command.INFO3_UPDATE_ONLY; - break; - case RecordExistsAction.REPLACE: - infoAttr |= Command.INFO3_CREATE_OR_REPLACE; - break; - case RecordExistsAction.REPLACE_ONLY: - infoAttr |= Command.INFO3_REPLACE_ONLY; - break; - case RecordExistsAction.CREATE_ONLY: - writeAttr |= Command.INFO2_CREATE_ONLY; - break; - } - - switch (policy.generationPolicy) - { - case GenerationPolicy.NONE: - break; - case GenerationPolicy.EXPECT_GEN_EQUAL: - generation = policy.generation; - writeAttr |= Command.INFO2_GENERATION; - break; - case GenerationPolicy.EXPECT_GEN_GT: - generation = policy.generation; - writeAttr |= Command.INFO2_GENERATION_GT; - break; - } - - if (policy.commitLevel == CommitLevel.COMMIT_MASTER) - { - infoAttr |= Command.INFO3_COMMIT_MASTER; - } - - if (policy.durableDelete) - { - writeAttr |= Command.INFO2_DURABLE_DELETE; - } - - dataOffset += 8; - - // Write all header data except total size which must be written last. - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. - dataBuffer[dataOffset++] = (byte)0; - dataBuffer[dataOffset++] = (byte)writeAttr; - dataBuffer[dataOffset++] = (byte)infoAttr; - dataBuffer[dataOffset++] = 0; // unused - dataBuffer[dataOffset++] = 0; // clear the result code - dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)policy.expiration, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); - } - - /// - /// Header write for operate command. - /// - private void WriteHeaderReadWrite - ( - WritePolicy policy, - OperateArgs args, - int fieldCount - ) - { - // Set flags. - int generation = 0; - int ttl = args.hasWrite ? policy.expiration : policy.readTouchTtlPercent; - int readAttr = args.readAttr; - int writeAttr = args.writeAttr; - int infoAttr = 0; - int operationCount = args.operations.Length; - - switch (policy.recordExistsAction) - { - case RecordExistsAction.UPDATE: - break; - case RecordExistsAction.UPDATE_ONLY: - infoAttr |= Command.INFO3_UPDATE_ONLY; - break; - case RecordExistsAction.REPLACE: - infoAttr |= Command.INFO3_CREATE_OR_REPLACE; - break; - case RecordExistsAction.REPLACE_ONLY: - infoAttr |= Command.INFO3_REPLACE_ONLY; - break; - case RecordExistsAction.CREATE_ONLY: - writeAttr |= Command.INFO2_CREATE_ONLY; - break; - } - - switch (policy.generationPolicy) - { - case GenerationPolicy.NONE: - break; - case GenerationPolicy.EXPECT_GEN_EQUAL: - generation = policy.generation; - writeAttr |= Command.INFO2_GENERATION; - break; - case GenerationPolicy.EXPECT_GEN_GT: - generation = policy.generation; - writeAttr |= Command.INFO2_GENERATION_GT; - break; - } - - if (policy.commitLevel == CommitLevel.COMMIT_MASTER) - { - infoAttr |= Command.INFO3_COMMIT_MASTER; - } - - if (policy.durableDelete) - { - writeAttr |= Command.INFO2_DURABLE_DELETE; - } - - switch (policy.readModeSC) - { - case ReadModeSC.SESSION: - break; - case ReadModeSC.LINEARIZE: - infoAttr |= Command.INFO3_SC_READ_TYPE; - break; - case ReadModeSC.ALLOW_REPLICA: - infoAttr |= Command.INFO3_SC_READ_RELAX; - break; - case ReadModeSC.ALLOW_UNAVAILABLE: - infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX; - break; - } - - if (policy.readModeAP == ReadModeAP.ALL) - { - readAttr |= Command.INFO1_READ_MODE_AP_ALL; - } - - if (policy.compress) - { - readAttr |= Command.INFO1_COMPRESS_RESPONSE; - } - - dataOffset += 8; - - // Write all header data except total size which must be written last. - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. - dataBuffer[dataOffset++] = (byte)readAttr; - dataBuffer[dataOffset++] = (byte)writeAttr; - dataBuffer[dataOffset++] = (byte)infoAttr; - dataBuffer[dataOffset++] = 0; // unused - dataBuffer[dataOffset++] = 0; // clear the result code - dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)ttl, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); - } - - /// - /// Header write for read commands. - /// - private void WriteHeaderRead - ( - Policy policy, - int timeout, - int readAttr, - int writeAttr, - int infoAttr, - int fieldCount, - int operationCount - ) - { - switch (policy.readModeSC) - { - case ReadModeSC.SESSION: - break; - case ReadModeSC.LINEARIZE: - infoAttr |= Command.INFO3_SC_READ_TYPE; - break; - case ReadModeSC.ALLOW_REPLICA: - infoAttr |= Command.INFO3_SC_READ_RELAX; - break; - case ReadModeSC.ALLOW_UNAVAILABLE: - infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX; - break; - } - - if (policy.readModeAP == ReadModeAP.ALL) - { - readAttr |= Command.INFO1_READ_MODE_AP_ALL; - } - - if (policy.compress) - { - readAttr |= Command.INFO1_COMPRESS_RESPONSE; - } - - dataOffset += 8; - - // Write all header data except total size which must be written last. - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. - dataBuffer[dataOffset++] = (byte)readAttr; - dataBuffer[dataOffset++] = (byte)writeAttr; - dataBuffer[dataOffset++] = (byte)infoAttr; - - for (int i = 0; i < 6; i++) - { - dataBuffer[dataOffset++] = 0; - } - dataOffset += ByteUtil.IntToBytes((uint)policy.readTouchTtlPercent, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); - } - - /// - /// Header write for read header commands. - /// - private void WriteHeaderReadHeader(Policy policy, int readAttr, int fieldCount, int operationCount) - { - int infoAttr = 0; - - switch (policy.readModeSC) - { - case ReadModeSC.SESSION: - break; - case ReadModeSC.LINEARIZE: - infoAttr |= Command.INFO3_SC_READ_TYPE; - break; - case ReadModeSC.ALLOW_REPLICA: - infoAttr |= Command.INFO3_SC_READ_RELAX; - break; - case ReadModeSC.ALLOW_UNAVAILABLE: - infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX; - break; - } - - if (policy.readModeAP == ReadModeAP.ALL) - { - readAttr |= Command.INFO1_READ_MODE_AP_ALL; - } - - dataOffset += 8; - - // Write all header data except total size which must be written last. - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. - dataBuffer[dataOffset++] = (byte)readAttr; - dataBuffer[dataOffset++] = (byte)0; - dataBuffer[dataOffset++] = (byte)infoAttr; - - for (int i = 0; i < 6; i++) - { - dataBuffer[dataOffset++] = 0; - } - dataOffset += ByteUtil.IntToBytes((uint)policy.readTouchTtlPercent, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); - } - - /// - /// Header write for batch single commands. - /// - private void WriteKeyAttr( - Policy policy, - Key key, - BatchAttr attr, - Expression filterExp, - int fieldCount, - int operationCount - ) - { - dataOffset += 8; - - // Write all header data except total size which must be written last. - dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. - dataBuffer[dataOffset++] = (byte)attr.readAttr; - dataBuffer[dataOffset++] = (byte)attr.writeAttr; - dataBuffer[dataOffset++] = (byte)attr.infoAttr; - dataBuffer[dataOffset++] = 0; // unused - dataBuffer[dataOffset++] = 0; // clear the result code - dataOffset += ByteUtil.IntToBytes((uint)attr.generation, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); - dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); - dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); - - WriteKey(policy, key, attr.hasWrite); - - filterExp?.Write(this); - } - - private void WriteKey(Policy policy, Key key, bool sendDeadline) - { - WriteKey(key); - WriteTxn(policy.Txn, sendDeadline); - - if (policy.sendKey) - { - WriteField(key.userKey, FieldType.KEY); - } - } - - private void WriteKey(Key key) - { - // Write key into dataBuffer. - if (key.ns != null) - { - WriteField(key.ns, FieldType.NAMESPACE); - } - - if (key.setName != null) - { - WriteField(key.setName, FieldType.TABLE); - } - - WriteField(key.digest, FieldType.DIGEST_RIPE); - } - - private int WriteReadOnlyOperations(Operation[] ops, int readAttr) - { - bool readBin = false; - bool readHeader = false; - - foreach (Operation op in ops) - { - switch (op.type) - { - case Operation.Type.READ: - // Read all bins if no bin is specified. - if (op.binName == null) - { - readAttr |= Command.INFO1_GET_ALL; - } - readBin = true; - break; - - case Operation.Type.READ_HEADER: - readHeader = true; - break; - - default: - break; - } - WriteOperation(op); - } - - if (readHeader && !readBin) - { - readAttr |= Command.INFO1_NOBINDATA; - } - return readAttr; - } - - private void WriteOperation(Bin bin, Operation.Type operationType) - { - int nameLength = ByteUtil.StringToUtf8(bin.name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); - int valueLength = bin.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength); - - ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); - dataBuffer[dataOffset++] = (byte) bin.value.Type; - dataBuffer[dataOffset++] = (byte) 0; - dataBuffer[dataOffset++] = (byte) nameLength; - dataOffset += nameLength + valueLength; - } - - private void WriteOperation(Operation operation) - { - int nameLength = ByteUtil.StringToUtf8(operation.binName, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); - int valueLength = operation.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength); - - ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = Operation.GetProtocolType(operation.type); - dataBuffer[dataOffset++] = (byte) operation.value.Type; - dataBuffer[dataOffset++] = (byte) 0; - dataBuffer[dataOffset++] = (byte) nameLength; - dataOffset += nameLength + valueLength; - } - - private void WriteOperation(string name, Operation.Type operationType) - { - int nameLength = ByteUtil.StringToUtf8(name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); - - ByteUtil.IntToBytes((uint)(nameLength + 4), dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); - dataBuffer[dataOffset++] = (byte) 0; - dataBuffer[dataOffset++] = (byte) 0; - dataBuffer[dataOffset++] = (byte) nameLength; - dataOffset += nameLength; - } - - private void WriteOperation(Operation.Type operationType) - { - ByteUtil.IntToBytes(4, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); - dataBuffer[dataOffset++] = 0; - dataBuffer[dataOffset++] = 0; - dataBuffer[dataOffset++] = 0; - } - - private int SizeTxn(Key key, Txn txn, bool sendDeadline) - { - int fieldCount = 0; - - if (txn != null) - { - dataOffset += 8 + FIELD_HEADER_SIZE; - fieldCount++; - - Version = txn.GetReadVersion(key); - - if (Version.HasValue) - { - dataOffset += 7 + FIELD_HEADER_SIZE; - fieldCount++; - } - - if (sendDeadline && txn.Deadline != 0) - { - dataOffset += 4 + FIELD_HEADER_SIZE; - fieldCount++; - } - } - return fieldCount; - } - - private void WriteTxn(Txn txn, bool sendDeadline) - { - if (txn != null) - { - WriteFieldLE(txn.Id, FieldType.MRT_ID); - - if (Version.HasValue) - { - WriteFieldVersion(Version.Value); - } - - if (sendDeadline && txn.Deadline != 0) - { - WriteFieldLE(txn.Deadline, FieldType.MRT_DEADLINE); - } - } - } - - private void WriteFieldVersion(long ver) - { - WriteFieldHeader(7, FieldType.RECORD_VERSION); - ByteUtil.LongToVersionBytes(ver, dataBuffer, dataOffset); - dataOffset += 7; - } - - private void WriteField(Value value, int type) - { - int offset = dataOffset + FIELD_HEADER_SIZE; - dataBuffer[offset++] = (byte)value.Type; - int len = value.Write(dataBuffer, offset) + 1; - WriteFieldHeader(len, type); - dataOffset += len; - } - - private void WriteField(string str, int type) - { - int len = ByteUtil.StringToUtf8(str, dataBuffer, dataOffset + FIELD_HEADER_SIZE); - WriteFieldHeader(len, type); - dataOffset += len; - } - - private void WriteField(byte[] bytes, int type) - { - Array.Copy(bytes, 0, dataBuffer, dataOffset + FIELD_HEADER_SIZE, bytes.Length); - WriteFieldHeader(bytes.Length, type); - dataOffset += bytes.Length; - } - - private void WriteField(int val, int type) - { - WriteFieldHeader(4, type); - ByteUtil.IntToBytes((uint)val, dataBuffer, dataOffset); - dataOffset += 4; - } - - private void WriteFieldLE(int val, int type) - { - WriteFieldHeader(4, type); - ByteUtil.IntToLittleBytes((uint)val, dataBuffer, dataOffset); - dataOffset += 4; - } - - private void WriteField(ulong val, int type) - { - WriteFieldHeader(8, type); - ByteUtil.LongToBytes(val, dataBuffer, dataOffset); - dataOffset += 8; - } - - private void WriteFieldLE(long val, int type) - { - WriteFieldHeader(8, type); - ByteUtil.LongToLittleBytes((ulong)val, dataBuffer, dataOffset); - dataOffset += 8; - } - - private void WriteFieldHeader(int size, int type) - { - ByteUtil.IntToBytes((uint)size + 1, dataBuffer, dataOffset); - dataOffset += 4; - dataBuffer[dataOffset++] = (byte)type; - } - - internal virtual void WriteExpHeader(int size) - { - WriteFieldHeader(size, FieldType.FILTER_EXP); - } - - private void Begin() - { - dataOffset = MSG_TOTAL_HEADER_SIZE; - } - - private bool SizeBuffer(Policy policy) - { - if (policy.compress && dataOffset > COMPRESS_THRESHOLD) - { - // Command will be compressed. First, write uncompressed command - // into separate dataBuffer. Save normal dataBuffer for compressed command. - // Normal dataBuffer in async mode is from dataBuffer pool that is used to - // minimize memory pinning during socket operations. - dataBuffer = new byte[dataOffset]; - dataOffset = 0; - return true; - } - else - { - // Command will be uncompressed. - SizeBuffer(); - return false; - } - } - - private void End(bool compress) - { - if (!compress) - { - End(); - return; - } - - // Write proto header. - ulong size = ((ulong)dataOffset - 8) | (CL_MSG_VERSION << 56) | (AS_MSG_TYPE << 48); - ByteUtil.LongToBytes(size, dataBuffer, 0); - - byte[] srcBuf = dataBuffer; - int srcSize = dataOffset; - - // Increase requested dataBuffer size in case compressed dataBuffer size is - // greater than the uncompressed dataBuffer size. - dataOffset += 16 + 100; - - // This method finds dataBuffer of requested size, resets dataOffset to segment offset - // and returns dataBuffer max size; - int trgBufSize = SizeBuffer(); - - // Compress to target starting at new dataOffset plus new header. - int trgSize = ByteUtil.Compress(srcBuf, srcSize, dataBuffer, dataOffset + 16, trgBufSize - 16) + 16; - - ulong proto = ((ulong)trgSize - 8) | (CL_MSG_VERSION << 56) | (MSG_TYPE_COMPRESSED << 48); - ByteUtil.LongToBytes(proto, dataBuffer, dataOffset); - ByteUtil.LongToBytes((ulong)srcSize, dataBuffer, dataOffset + 8); - SetLength(trgSize); - } - - protected internal abstract int SizeBuffer(); - protected internal abstract void End(); - protected internal abstract void SetLength(int length); - - //-------------------------------------------------- - // Response Parsing - //-------------------------------------------------- - - internal virtual void SkipKey(int fieldCount) - { - // There can be fields in the response (setname etc). - // But for now, ignore them. Expose them to the API if needed in the future. - for (int i = 0; i < fieldCount; i++) - { - int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4 + fieldlen; - } - } - - internal virtual Key ParseKey(int fieldCount, out ulong bval) - { - byte[] digest = null; - string ns = null; - string setName = null; - Value userKey = null; - bval = 0; - - for (int i = 0; i < fieldCount; i++) - { - int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - - int fieldtype = dataBuffer[dataOffset++]; - int size = fieldlen - 1; - - switch (fieldtype) - { - case FieldType.DIGEST_RIPE: - digest = new byte[size]; - Array.Copy(dataBuffer, dataOffset, digest, 0, size); - break; - - case FieldType.NAMESPACE: - ns = ByteUtil.Utf8ToString(dataBuffer, dataOffset, size); - break; - - case FieldType.TABLE: - setName = ByteUtil.Utf8ToString(dataBuffer, dataOffset, size); - break; - - case FieldType.KEY: - int type = dataBuffer[dataOffset++]; - size--; - userKey = ByteUtil.BytesToKeyValue((ParticleType)type, dataBuffer, dataOffset, size); - break; - - case FieldType.BVAL_ARRAY: - bval = (ulong)ByteUtil.LittleBytesToLong(dataBuffer, dataOffset); - break; - } - dataOffset += size; - } - return new Key(ns, digest, setName, userKey); - } - - public long? ParseVersion(int fieldCount) - { - long? version = null; - - for (int i = 0; i < fieldCount; i++) - { - int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - - int type = dataBuffer[dataOffset++]; - int size = len - 1; - - if (type == FieldType.RECORD_VERSION && size == 7) - { - version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset); - } - dataOffset += size; - } - return version; - } - - public static bool BatchInDoubt(bool isWrite, int commandSentCounter) - { - return isWrite && commandSentCounter > 1; - } - - public interface BatchOffsets - { - int Size(); - int Get(int i); - } - - private class BatchOffsetsNative : BatchOffsets - { - private int size; - private int[] offsets; - - public BatchOffsetsNative(BatchNode batch) - { - this.size = batch.offsetsSize; - this.offsets = batch.offsets; - } - - public int Size() - { - return size; - } - - public int Get(int i) - { - return offsets[i]; - } - } - } -} -#pragma warning restore 0618 +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using System.Collections; + +#pragma warning disable 0618 + +namespace Aerospike.Client +{ + public abstract class Command + { + public static readonly int INFO1_READ = (1 << 0); // Contains a read operation. + public static readonly int INFO1_GET_ALL = (1 << 1); // Get all bins. + public static readonly int INFO1_SHORT_QUERY = (1 << 2); // Short query. + public static readonly int INFO1_BATCH = (1 << 3); // Batch read or exists. + public static readonly int INFO1_NOBINDATA = (1 << 5); // Do not read the bins. + public static readonly int INFO1_READ_MODE_AP_ALL = (1 << 6); // Involve all replicas in read operation. + public static readonly int INFO1_COMPRESS_RESPONSE = (1 << 7); // Tell server to compress it's response. + + public static readonly int INFO2_WRITE = (1 << 0); // Create or update record + public static readonly int INFO2_DELETE = (1 << 1); // Fling a record into the belly of Moloch. + public static readonly int INFO2_GENERATION = (1 << 2); // Update if expected generation == old. + public static readonly int INFO2_GENERATION_GT = (1 << 3); // Update if new generation >= old, good for restore. + public static readonly int INFO2_DURABLE_DELETE = (1 << 4); // Command resulting in record deletion leaves tombstone (Enterprise only). + public static readonly int INFO2_CREATE_ONLY = (1 << 5); // Create only. Fail if record already exists. + public static readonly int INFO2_RELAX_AP_LONG_QUERY = (1 << 6); // Treat as long query, but relac read consistency + public static readonly int INFO2_RESPOND_ALL_OPS = (1 << 7); // Return a result for every operation. + + public static readonly int INFO3_LAST = (1 << 0); // This is the last of a multi-part message. + public static readonly int INFO3_COMMIT_MASTER = (1 << 1); // Commit to master only before declaring success. + // On send: Do not return partition done in scan/query. + // On receive: Specified partition is done in scan/query. + public static readonly int INFO3_PARTITION_DONE = (1 << 2); + public static readonly int INFO3_UPDATE_ONLY = (1 << 3); // Update only. Merge bins. + public static readonly int INFO3_CREATE_OR_REPLACE = (1 << 4); // Create or completely replace record. + public static readonly int INFO3_REPLACE_ONLY = (1 << 5); // Completely replace existing record only. + public static readonly int INFO3_SC_READ_TYPE = (1 << 6); // See below. + public static readonly int INFO3_SC_READ_RELAX = (1 << 7); // See below. + + // Interpret SC_READ bits in info3. + // + // RELAX TYPE + // strict + // ------ + // 0 0 sequential (default) + // 0 1 linearize + // + // relaxed + // ------- + // 1 0 allow replica + // 1 1 allow unavailable + + public static readonly int INFO4_MRT_VERIFY_READ = (1 << 0); // Send MRT version to the server to be verified. + public static readonly int INFO4_MRT_ROLL_FORWARD = (1 << 1); // Roll forward MRT. + public static readonly int INFO4_MRT_ROLL_BACK = (1 << 2); // Roll back MRT. + + public const byte STATE_READ_AUTH_HEADER = 1; + public const byte STATE_READ_HEADER = 2; + public const byte STATE_READ_DETAIL = 3; + public const byte STATE_COMPLETE = 4; + + public const byte BATCH_MSG_READ = 0x0; + public const byte BATCH_MSG_REPEAT = 0x1; + public const byte BATCH_MSG_INFO = 0x2; + public const byte BATCH_MSG_GEN = 0x4; + public const byte BATCH_MSG_TTL = 0x8; + public const byte BATCH_MSG_INFO4 = 0x10; + + public const int MSG_TOTAL_HEADER_SIZE = 30; + public const int FIELD_HEADER_SIZE = 5; + public const int OPERATION_HEADER_SIZE = 8; + public const int MSG_REMAINING_HEADER_SIZE = 22; + public const int COMPRESS_THRESHOLD = 128; + public const ulong CL_MSG_VERSION = 2UL; + public const ulong AS_MSG_TYPE = 3UL; + public const ulong MSG_TYPE_COMPRESSED = 4UL; + + internal byte[] dataBuffer; + internal int dataOffset; + internal readonly int maxRetries; + internal readonly int serverTimeout; + internal int socketTimeout; + internal int totalTimeout; + internal long? Version; + + protected int resultCode; + protected int generation; + protected int expiration; + protected int fieldCount; + protected int opCount; + + public Command(int socketTimeout, int totalTimeout, int maxRetries) + { + this.maxRetries = maxRetries; + this.totalTimeout = totalTimeout; + + if (totalTimeout > 0) + { + this.socketTimeout = (socketTimeout < totalTimeout && socketTimeout > 0) ? socketTimeout : totalTimeout; + this.serverTimeout = this.socketTimeout; + } + else + { + this.socketTimeout = socketTimeout; + this.serverTimeout = 0; + } + + resultCode = 0; + generation = 0; + expiration = 0; + fieldCount = 0; + opCount = 0; + } + + //-------------------------------------------------- + // Multi-record Transactions + //-------------------------------------------------- + + public void SetTxnAddKeys(WritePolicy policy, Key key, OperateArgs args) + { + Begin(); + int fieldCount = EstimateKeySize(key); + dataOffset += args.size; + + bool compress = SizeBuffer(policy); + + WriteTxnMonitor(key, args.readAttr, args.writeAttr, fieldCount, args.operations.Length); + + foreach (Operation operation in args.operations) + { + WriteOperation(operation); + } + + End(compress); + } + + public void SetTxnVerify(Key key, long ver) + { + Begin(); + int fieldCount = EstimateKeySize(key); + + // Version field. + dataOffset += 7 + FIELD_HEADER_SIZE; + fieldCount++; + + SizeBuffer(); + dataOffset += 8; + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA); + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)Command.INFO3_SC_READ_TYPE; + dataBuffer[dataOffset++] = (byte)Command.INFO4_MRT_VERIFY_READ; + dataBuffer[dataOffset++] = 0; + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); + + WriteKey(key); + WriteFieldVersion(ver); + End(); + } + + public void SetBatchTxnVerify( + BatchPolicy policy, + Key[] keys, + long?[] versions, + BatchNode batch + ) + { + BatchOffsetsNative offsets = new(batch); + SetBatchTxnVerify(policy, keys, versions, offsets); + } + + public void SetBatchTxnVerify( + BatchPolicy policy, + Key[] keys, + long?[] versions, + BatchOffsets offsets + ) + { + // Estimate buffer size. + Begin(); + + // Batch field + dataOffset += FIELD_HEADER_SIZE + 5; + + Key keyPrev = null; + long? verPrev = null; + int max = offsets.Size(); + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions[offset]; + + dataOffset += key.digest.Length + 4; + + if (CanRepeat(key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Write full header and namespace/set/bin names. + dataOffset += 9; // header(4) + info4(1) + fieldCount(2) + opCount(2) = 9 + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + + if (ver.HasValue) + { + dataOffset += 7 + FIELD_HEADER_SIZE; + } + keyPrev = key; + verPrev = ver; + } + } + + bool compress = SizeBuffer(policy); + + WriteBatchHeader(policy, totalTimeout, 1); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = GetBatchFlags(policy); + keyPrev = null; + verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions[offset]; + + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; + + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + if (CanRepeat(key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full message. + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4); + dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA); + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)Command.INFO3_SC_READ_TYPE; + dataBuffer[dataOffset++] = (byte)Command.INFO4_MRT_VERIFY_READ; + + int fieldCount = 0; + + if (ver.HasValue) + { + fieldCount++; + } + + WriteBatchFields(key, fieldCount, 0); + + if (ver.HasValue) + { + WriteFieldVersion(ver.Value); + } + + keyPrev = key; + verPrev = ver; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + End(compress); + } + + public void SetTxnMarkRollForward(Key key) + { + Bin bin = new("fwd", true); + + Begin(); + int fieldCount = EstimateKeySize(key); + EstimateOperationSize(bin); + WriteTxnMonitor(key, 0, Command.INFO2_WRITE, fieldCount, 1); + WriteOperation(bin, Operation.Type.WRITE); + End(); + } + + public void SetTxnRoll(Key key, Txn txn, int txnAttr) + { + Begin(); + int fieldCount = EstimateKeySize(key); + + fieldCount += SizeTxn(key, txn, false); + + SizeBuffer(); + dataOffset += 8; + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)(Command.INFO2_WRITE | Command.INFO2_DURABLE_DELETE); + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)txnAttr; + dataBuffer[dataOffset++] = 0; // clear the result code + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); + + WriteKey(key); + WriteTxn(txn, false); + End(); + } + + public void SetBatchTxnRoll( + BatchPolicy policy, + Txn txn, + Key[] keys, + BatchNode batch, + BatchAttr attr + ) + { + BatchOffsetsNative offsets = new(batch); + SetBatchTxnRoll(policy, txn, keys, attr, offsets); + } + + public void SetBatchTxnRoll( + BatchPolicy policy, + Txn txn, + Key[] keys, + BatchAttr attr, + BatchOffsets offsets + ) + { + // Estimate buffer size. + Begin(); + int fieldCount = 1; + int max = offsets.Size(); + long?[] versions = new long?[max]; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + versions[i] = txn.GetReadVersion(key); + } + + // Batch field + dataOffset += FIELD_HEADER_SIZE + 5; + + Key keyPrev = null; + long? verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions[i]; + + dataOffset += key.digest.Length + 4; + + if (CanRepeat(key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Write full header and namespace/set/bin names. + dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + SizeTxnBatch(txn, ver, attr.hasWrite); + dataOffset += 2; // gen(2) = 2 + keyPrev = key; + verPrev = ver; + } + } + + bool compress = SizeBuffer(policy); + + WriteBatchHeader(policy, totalTimeout, fieldCount); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = GetBatchFlags(policy); + keyPrev = null; + verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions[i]; + + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; + + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + if (CanRepeat(key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full message. + WriteBatchWrite(key, txn, ver, attr, null, 0, 0); + keyPrev = key; + verPrev = ver; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + End(compress); + } + + public void SetTxnClose(Txn txn, Key key) + { + Begin(); + int fieldCount = EstimateKeySize(key); + WriteTxnMonitor(key, 0, Command.INFO2_WRITE | Command.INFO2_DELETE | Command.INFO2_DURABLE_DELETE, + fieldCount, 0); + End(); + } + + private void WriteTxnMonitor(Key key, int readAttr, int writeAttr, int fieldCount, int opCount) + { + SizeBuffer(); + dataOffset += 8; + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[dataOffset++] = (byte)readAttr; + dataBuffer[dataOffset++] = (byte)writeAttr; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = 0; + dataBuffer[dataOffset++] = 0; + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)opCount, dataBuffer, dataOffset); + + WriteKey(key); + } + + //-------------------------------------------------- + // Writes + //-------------------------------------------------- + + public virtual void SetWrite(WritePolicy policy, Operation.Type operation, Key key, Bin[] bins) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, true); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + + foreach (Bin bin in bins) + { + EstimateOperationSize(bin); + } + + bool compress = SizeBuffer(policy); + + WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, bins.Length); + WriteKey(policy, key, true); + + policy.filterExp?.Write(this); + + foreach (Bin bin in bins) + { + WriteOperation(bin, operation); + } + End(compress); + } + + public virtual void SetDelete(WritePolicy policy, Key key) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, true); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + SizeBuffer(); + WriteHeaderWrite(policy, Command.INFO2_WRITE | Command.INFO2_DELETE, fieldCount, 0); + WriteKey(policy, key, true); + + policy.filterExp?.Write(this); + End(); + } + + public void SetDelete(Policy policy, Key key, BatchAttr attr) + { + Begin(); + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); + SizeBuffer(); + WriteKeyAttr(policy, key, attr, exp, fieldCount, 0); + End(); + } + + public virtual void SetTouch(WritePolicy policy, Key key) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, true); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + EstimateOperationSize(); + SizeBuffer(); + WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 1); + WriteKey(policy, key, true); + + policy.filterExp?.Write(this); + WriteOperation(Operation.Type.TOUCH); + End(); + } + + //-------------------------------------------------- + // Reads + //-------------------------------------------------- + + public virtual void SetExists(Policy policy, Key key) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, false); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + SizeBuffer(); + WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0); + WriteKey(policy, key, false); + + policy.filterExp?.Write(this); + End(); + } + + public virtual void SetRead(Policy policy, Key key, string[] binNames) + { + int readAttr = Command.INFO1_READ; + int opCount = 0; + + if (binNames != null && binNames.Length > 0) + { + opCount = binNames.Length; + } + else + { + readAttr |= Command.INFO1_GET_ALL; + } + + Begin(); + int fieldCount = EstimateKeySize(policy, key, false); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + + if (opCount != 0) + { + foreach (string binName in binNames) + { + EstimateOperationSize(binName); + } + } + + SizeBuffer(); + WriteHeaderRead(policy, serverTimeout, readAttr, 0, 0, fieldCount, opCount); + WriteKey(policy, key, false); + + policy.filterExp?.Write(this); + + if (opCount != 0) + { + foreach (string binName in binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + End(); + } + + public void SetRead(Policy policy, BatchRead br) + { + Begin(); + + BatchReadPolicy rp = br.policy; + BatchAttr attr = new(); + Expression exp; + int opCount; + + if (rp != null) + { + attr.SetRead(rp); + exp = rp.filterExp ?? policy.filterExp; + } + else + { + attr.SetRead(policy); + exp = policy.filterExp; + } + + if (br.binNames != null) + { + opCount = br.binNames.Length; + + foreach (string binName in br.binNames) + { + EstimateOperationSize(binName); + } + } + else if (br.ops != null) + { + attr.AdjustRead(br.ops); + opCount = br.ops.Length; + + foreach (Operation op in br.ops) + { + if (Operation.IsWrite(op.type)) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in read"); + } + EstimateOperationSize(op); + } + } + else + { + attr.AdjustRead(br.readAllBins); + opCount = 0; + } + + int fieldCount = EstimateKeyAttrSize(policy, br.key, attr, exp); + + SizeBuffer(); + WriteKeyAttr(policy, br.key, attr, exp, fieldCount, opCount); + + if (br.binNames != null) + { + foreach (string binName in br.binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + else if (br.ops != null) + { + foreach (Operation op in br.ops) + { + WriteOperation(op); + } + } + End(); + } + + public void SetRead(Policy policy, Key key, Operation[] ops) + { + Begin(); + + BatchAttr attr = new(); + attr.SetRead(policy); + attr.AdjustRead(ops); + + int fieldCount = EstimateKeyAttrSize(policy, key, attr, policy.filterExp); + + foreach (Operation op in ops) + { + if (Operation.IsWrite(op.type)) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in read"); + } + EstimateOperationSize(op); + } + + SizeBuffer(); + WriteKeyAttr(policy, key, attr, policy.filterExp, fieldCount, ops.Length); + + foreach (Operation op in ops) + { + WriteOperation(op); + } + End(); + } + + public virtual void SetReadHeader(Policy policy, Key key) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, false); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + SizeBuffer(); + WriteHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0); + WriteKey(policy, key, false); + + policy.filterExp?.Write(this); + End(); + } + + //-------------------------------------------------- + // Operate + //-------------------------------------------------- + + public virtual void SetOperate(WritePolicy policy, Key key, OperateArgs args) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, args.hasWrite); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + dataOffset += args.size; + + bool compress = SizeBuffer(policy); + + WriteHeaderReadWrite(policy, args, fieldCount); + WriteKey(policy, key, args.hasWrite); + + policy.filterExp?.Write(this); + + foreach (Operation operation in args.operations) + { + WriteOperation(operation); + } + End(compress); + } + + public void SetOperate(Policy policy, BatchAttr attr, Key key, Operation[] ops) + { + Begin(); + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); + + dataOffset += attr.opSize; + bool compress = SizeBuffer(policy); + WriteKeyAttr(policy, key, attr, exp, fieldCount, ops.Length); + + foreach (Operation op in ops) + { + WriteOperation(op); + } + End(compress); + } + + + //-------------------------------------------------- + // UDF + //-------------------------------------------------- + + public virtual void SetUdf(WritePolicy policy, Key key, string packageName, string functionName, Value[] args) + { + Begin(); + int fieldCount = EstimateKeySize(policy, key, true); + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + byte[] argBytes = Packer.Pack(args); + fieldCount += EstimateUdfSize(packageName, functionName, argBytes); + + bool compress = SizeBuffer(policy); + + WriteHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 0); + WriteKey(policy, key, true); + + policy.filterExp?.Write(this); + WriteField(packageName, FieldType.UDF_PACKAGE_NAME); + WriteField(functionName, FieldType.UDF_FUNCTION); + WriteField(argBytes, FieldType.UDF_ARGLIST); + End(compress); + } + + public void SetUdf(Policy policy, BatchAttr attr, Key key, string packageName, string functionName, Value[] args) + { + byte[] argBytes = Packer.Pack(args); + SetUdf(policy, attr, key, packageName, functionName, argBytes); + } + + public void SetUdf(Policy policy, BatchAttr attr, Key key, string packageName, string functionName, byte[] argBytes) + { + Begin(); + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = EstimateKeyAttrSize(policy, key, attr, exp); + fieldCount += EstimateUdfSize(packageName, functionName, argBytes); + + bool compress = SizeBuffer(policy); + WriteKeyAttr(policy, key, attr, exp, fieldCount, 0); + WriteField(packageName, FieldType.UDF_PACKAGE_NAME); + WriteField(functionName, FieldType.UDF_FUNCTION); + WriteField(argBytes, FieldType.UDF_ARGLIST); + End(compress); + } + + //-------------------------------------------------- + // Batch Read Only + //-------------------------------------------------- + + public virtual void SetBatchRead(BatchPolicy policy, List records, BatchNode batch) + { + // Estimate full row size + int[] offsets = batch.offsets; + int max = batch.offsetsSize; + BatchRead prev = null; + + Begin(); + int fieldCount = 1; + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + + dataOffset += FIELD_HEADER_SIZE + 5; + + for (int i = 0; i < max; i++) + { + BatchRead record = records[offsets[i]]; + Key key = record.key; + string[] binNames = record.binNames; + Operation[] ops = record.ops; + + dataOffset += key.digest.Length + 4; + + // Avoid relatively expensive full equality checks for performance reasons. + // Use reference equality only in hope that common namespaces/bin names are set from + // fixed variables. It's fine if equality not determined correctly because it just + // results in more space used. The batch will still be correct. + if (prev != null && prev.key.ns == key.ns && prev.key.setName == key.setName && + prev.binNames == binNames && prev.readAllBins == record.readAllBins && + prev.ops == ops) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Estimate full header, namespace and bin names. + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + + if (binNames != null) + { + foreach (string binName in binNames) + { + EstimateOperationSize(binName); + } + } + else if (ops != null) + { + foreach (Operation op in ops) + { + EstimateReadOperationSize(op); + } + } + prev = record; + } + } + + bool compress = SizeBuffer(policy); + + int readAttr = Command.INFO1_READ; + + if (policy.readModeAP == ReadModeAP.ALL) + { + readAttr |= Command.INFO1_READ_MODE_AP_ALL; + } + + WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0); + + policy.filterExp?.Write(this); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; + prev = null; + + for (int i = 0; i < max; i++) + { + int index = offsets[i]; + ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); + dataOffset += 4; + + BatchRead record = records[index]; + Key key = record.key; + string[] binNames = record.binNames; + Operation[] ops = record.ops; + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + // Avoid relatively expensive full equality checks for performance reasons. + // Use reference equality only in hope that common namespaces/bin names are set from + // fixed variables. It's fine if equality not determined correctly because it just + // results in more space used. The batch will still be correct. + if (prev != null && prev.key.ns == key.ns && prev.key.setName == key.setName && + prev.binNames == binNames && prev.readAllBins == record.readAllBins && + prev.ops == ops) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full header, namespace and bin names. + dataBuffer[dataOffset++] = BATCH_MSG_READ; + + if (binNames != null && binNames.Length != 0) + { + dataBuffer[dataOffset++] = (byte)readAttr; + WriteBatchFields(key, 0, binNames.Length); + + foreach (string binName in binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + else if (ops != null) + { + int offset = dataOffset++; + WriteBatchFields(key, 0, ops.Length); + dataBuffer[offset] = (byte)WriteReadOnlyOperations(ops, readAttr); + } + else + { + dataBuffer[dataOffset++] = (byte)(readAttr | (record.readAllBins ? Command.INFO1_GET_ALL : Command.INFO1_NOBINDATA)); + WriteBatchFields(key, 0, 0); + } + prev = record; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); + End(compress); + } + + public virtual void SetBatchRead + ( + BatchPolicy policy, + Key[] keys, + BatchNode batch, + string[] binNames, + Operation[] ops, + int readAttr + ) + { + // Estimate full row size + int[] offsets = batch.offsets; + int max = batch.offsetsSize; + + // Estimate dataBuffer size. + Begin(); + int fieldCount = 1; + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + dataOffset += FIELD_HEADER_SIZE + 5; + + Key prev = null; + + for (int i = 0; i < max; i++) + { + Key key = keys[offsets[i]]; + + dataOffset += key.digest.Length + 4; + + // Try reference equality in hope that namespace for all keys is set from a fixed variable. + if (prev != null && prev.ns == key.ns && prev.setName == key.setName) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Estimate full header, namespace and bin names. + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE + 6; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + + if (binNames != null) + { + foreach (String binName in binNames) + { + EstimateOperationSize(binName); + } + } + else if (ops != null) + { + foreach (Operation op in ops) + { + EstimateReadOperationSize(op); + } + } + prev = key; + } + } + + bool compress = SizeBuffer(policy); + + if (policy.readModeAP == ReadModeAP.ALL) + { + readAttr |= Command.INFO1_READ_MODE_AP_ALL; + } + + WriteHeaderRead(policy, totalTimeout, readAttr | Command.INFO1_BATCH, 0, 0, fieldCount, 0); + + policy.filterExp?.Write(this); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = (policy.allowInline) ? (byte)1 : (byte)0; + prev = null; + + for (int i = 0; i < max; i++) + { + int index = offsets[i]; + ByteUtil.IntToBytes((uint)index, dataBuffer, dataOffset); + dataOffset += 4; + + Key key = keys[index]; + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + // Try reference equality in hope that namespace for all keys is set from a fixed variable. + if (prev != null && prev.ns == key.ns && prev.setName == key.setName) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full header, namespace and bin names. + dataBuffer[dataOffset++] = BATCH_MSG_READ; + + if (binNames != null && binNames.Length != 0) + { + dataBuffer[dataOffset++] = (byte)readAttr; + WriteBatchFields(key, 0, binNames.Length); + + foreach (String binName in binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + else if (ops != null) + { + int offset = dataOffset++; + WriteBatchFields(key, 0, ops.Length); + dataBuffer[offset] = (byte)WriteReadOnlyOperations(ops, readAttr); + } + else + { + dataBuffer[dataOffset++] = (byte)readAttr; + WriteBatchFields(key, 0, 0); + } + prev = key; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); + End(compress); + } + + //-------------------------------------------------- + // Batch Read/Write Operations + //-------------------------------------------------- + + public virtual void SetBatchOperate( + BatchPolicy policy, + IList records, + BatchNode batch) + { + BatchOffsetsNative offsets = new(batch); + SetBatchOperate(policy, records, offsets); + } + + public void SetBatchOperate( + BatchPolicy policy, + IList records, + BatchOffsets offsets) + { + Begin(); + int max = offsets.Size(); + Txn txn = policy.Txn; + long?[] versions = null; + + if (txn != null) + { + versions = new long?[max]; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + BatchRecord record = (BatchRecord)records[offset]; + versions[i] = txn.GetReadVersion(record.key); + } + } + + int fieldCount = 1; + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + + dataOffset += FIELD_HEADER_SIZE + 5; + + BatchRecord prev = null; + long? verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + BatchRecord record = (BatchRecord)records[offset]; + Key key = record.key; + long? ver = versions?[i]; + + dataOffset += key.digest.Length + 4; + + if (CanRepeat(policy, key, record, prev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Estimate full header, namespace and bin names. + dataOffset += 12; + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + SizeTxnBatch(txn, ver, record.hasWrite); + dataOffset += record.Size(policy); + prev = record; + verPrev = ver; + } + } + bool compress = SizeBuffer(policy); + + WriteBatchHeader(policy, totalTimeout, fieldCount); + + policy.filterExp?.Write(this); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = GetBatchFlags(policy); + + BatchAttr attr = new(); + prev = null; + verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + BatchRecord record = (BatchRecord)records[offset]; + long? ver = versions?[i]; + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; + + Key key = record.key; + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + if (CanRepeat(policy, key, record, prev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full message. + switch (record.GetBatchType()) + { + case BatchRecord.Type.BATCH_READ: + { + BatchRead br = (BatchRead)record; + + if (br.policy != null) + { + attr.SetRead(br.policy); + } + else + { + attr.SetRead(policy); + } + + if (br.binNames != null) + { + if (br.binNames.Length > 0) + { + WriteBatchBinNames(key, txn, ver, br.binNames, attr, attr.filterExp); + } + else + { + attr.AdjustRead(true); + WriteBatchRead(key, txn, ver, attr, attr.filterExp, 0); + } + } + else if (br.ops != null) + { + attr.AdjustRead(br.ops); + WriteBatchOperations(key, txn, ver, br.ops, attr, attr.filterExp); + } + else + { + attr.AdjustRead(br.readAllBins); + WriteBatchRead(key, txn, ver, attr, attr.filterExp, 0); + } + break; + } + + case BatchRecord.Type.BATCH_WRITE: + { + BatchWrite bw = (BatchWrite)record; + + if (bw.policy != null) + { + attr.SetWrite(bw.policy); + } + else + { + attr.SetWrite(policy); + } + attr.AdjustWrite(bw.ops); + WriteBatchOperations(key, txn, ver, bw.ops, attr, attr.filterExp); + break; + } + + case BatchRecord.Type.BATCH_UDF: + { + BatchUDF bu = (BatchUDF)record; + + if (bu.policy != null) + { + attr.SetUDF(bu.policy); + } + else + { + attr.SetUDF(policy); + } + WriteBatchWrite(key, txn, ver, attr, attr.filterExp, 3, 0); + WriteField(bu.packageName, FieldType.UDF_PACKAGE_NAME); + WriteField(bu.functionName, FieldType.UDF_FUNCTION); + WriteField(bu.argBytes, FieldType.UDF_ARGLIST); + break; + } + + case BatchRecord.Type.BATCH_DELETE: + { + BatchDelete bd = (BatchDelete)record; + + if (bd.policy != null) + { + attr.SetDelete(bd.policy); + } + else + { + attr.SetDelete(policy); + } + WriteBatchWrite(key, txn, ver, attr, attr.filterExp, 0, 0); + break; + } + } + prev = record; + verPrev = ver; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)(dataOffset - MSG_TOTAL_HEADER_SIZE - 4), dataBuffer, fieldSizeOffset); + End(compress); + } + + public virtual void SetBatchOperate + ( + BatchPolicy policy, + Key[] keys, + BatchNode batch, + string[] binNames, + Operation[] ops, + BatchAttr attr + ) + { + BatchOffsetsNative offsets = new(batch); + SetBatchOperate(policy, keys, binNames, ops, attr, offsets); + } + + public void SetBatchOperate( + BatchPolicy policy, + Key[] keys, + string[] binNames, + Operation[] ops, + BatchAttr attr, + BatchOffsets offsets + ) + { + // Estimate full row size + int max = offsets.Size(); + Txn txn = policy.Txn; + long?[] versions = null; + + Begin(); + + if (txn != null) + { + versions = new long?[max]; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + versions[i] = txn.GetReadVersion(key); + } + } + + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = 1; + + if (exp != null) + { + dataOffset += exp.Size(); + fieldCount++; + } + + dataOffset += FIELD_HEADER_SIZE + 5; + + Key keyPrev = null; + long? verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions?[i]; + + dataOffset += key.digest.Length + 4; + + if (CanRepeat(attr, key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Write full header and namespace/set/bin names. + dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + SizeTxnBatch(txn, ver, attr.hasWrite); + + if (attr.sendKey) + { + dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; + } + + if (binNames != null) + { + foreach (string binName in binNames) + { + EstimateOperationSize(binName); + } + } + else if (ops != null) + { + foreach (Operation op in ops) + { + if (Operation.IsWrite(op.type)) + { + if (!attr.hasWrite) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in batch read"); + } + dataOffset += 2; // Extra write specific fields. + } + EstimateOperationSize(op); + } + } + else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) + { + dataOffset += 2; // Extra write specific fields. + } + keyPrev = key; + verPrev = ver; + } + } + + bool compress = SizeBuffer(policy); + + WriteBatchHeader(policy, totalTimeout, fieldCount); + + exp?.Write(this); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = GetBatchFlags(policy); + keyPrev = null; + verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions?[i]; + + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; + + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + if (CanRepeat(attr, key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full message. + if (binNames != null) + { + WriteBatchBinNames(key, txn, ver, binNames, attr, null); + } + else if (ops != null) + { + WriteBatchOperations(key, txn, ver, ops, attr, null); + } + else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) + { + WriteBatchWrite(key, txn, ver, attr, null, 0, 0); + } + else + { + WriteBatchRead(key, txn, ver, attr, null, 0); + } + keyPrev = key; + verPrev = ver; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + End(compress); + } + + public virtual void SetBatchUDF( + BatchPolicy policy, + Key[] keys, + BatchNode batch, + string packageName, + string functionName, + byte[] argBytes, + BatchAttr attr + ) + { + BatchOffsetsNative offsets = new(batch); + SetBatchUDF(policy, keys, packageName, functionName, argBytes, attr, offsets); + } + + public virtual void SetBatchUDF + ( + BatchPolicy policy, + Key[] keys, + string packageName, + string functionName, + byte[] argBytes, + BatchAttr attr, + BatchOffsets offsets + ) + { + // Estimate buffer size. + Begin(); + int max = offsets.Size(); + Txn txn = policy.Txn; + long?[] versions = null; + + if (txn != null) + { + versions = new long?[max]; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + versions[i] = txn.GetReadVersion(key); + } + } + + Expression exp = GetBatchExpression(policy, attr); + int fieldCount = 1; + + if (exp != null) + { + dataOffset += exp.Size(); + fieldCount++; + } + + dataOffset += FIELD_HEADER_SIZE + 5; + + Key keyPrev = null; + long? verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions?[i]; + + dataOffset += key.digest.Length + 4; + + if (CanRepeat(attr, key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else + { + // Write full header and namespace/set/bin names. + dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + SizeTxnBatch(txn, ver, attr.hasWrite); + + if (attr.sendKey) + { + dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; + } + dataOffset += 2; // gen(2) = 2 + EstimateUdfSize(packageName, functionName, argBytes); + keyPrev = key; + verPrev = ver; + } + } + + bool compress = SizeBuffer(policy); + + WriteBatchHeader(policy, totalTimeout, fieldCount); + + exp?.Write(this); + + int fieldSizeOffset = dataOffset; + WriteFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + ByteUtil.IntToBytes((uint)max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = GetBatchFlags(policy); + keyPrev = null; + verPrev = null; + + for (int i = 0; i < max; i++) + { + int offset = offsets.Get(i); + Key key = keys[offset]; + long? ver = versions?[i]; + + ByteUtil.IntToBytes((uint)offset, dataBuffer, dataOffset); + dataOffset += 4; + + byte[] digest = key.digest; + Array.Copy(digest, 0, dataBuffer, dataOffset, digest.Length); + dataOffset += digest.Length; + + if (CanRepeat(attr, key, keyPrev, ver, verPrev)) + { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else + { + // Write full message. + WriteBatchWrite(key, txn, ver, attr, null, 3, 0); + WriteField(packageName, FieldType.UDF_PACKAGE_NAME); + WriteField(functionName, FieldType.UDF_FUNCTION); + WriteField(argBytes, FieldType.UDF_ARGLIST); + keyPrev = key; + verPrev = ver; + } + } + + // Write real field size. + ByteUtil.IntToBytes((uint)dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + End(compress); + } + + private static bool CanRepeat( + Policy policy, + Key key, + BatchRecord record, + BatchRecord prev, + long? ver, + long? verPrev + ) + { + // Avoid relatively expensive full equality checks for performance reasons. + // Use reference equality only in hope that common namespaces/bin names are set from + // fixed variables. It's fine if equality not determined correctly because it just + // results in more space used. The batch will still be correct. + // Same goes for ver reference equality check. + return !policy.sendKey && verPrev == ver && prev != null && prev.key.ns == key.ns && + prev.key.setName == key.setName && record.Equals(prev); + } + + private static bool CanRepeat(BatchAttr attr, Key key, Key keyPrev, long? ver, long? verPrev) + { + return !attr.sendKey && verPrev == ver && keyPrev != null && keyPrev.ns == key.ns && + keyPrev.setName == key.setName; + } + + private static bool CanRepeat(Key key, Key keyPrev, long? ver, long? verPrev) + { + return verPrev == ver && keyPrev != null && keyPrev.ns == key.ns && + keyPrev.setName == key.setName; + } + + private static Expression GetBatchExpression(Policy policy, BatchAttr attr) + { + return attr.filterExp ?? policy.filterExp; + } + + private static byte GetBatchFlags(BatchPolicy policy) + { + byte flags = 0x8; + + if (policy.allowInline) + { + flags |= 0x1; + } + + if (policy.allowInlineSSD) + { + flags |= 0x2; + } + + if (policy.respondAllKeys) + { + flags |= 0x4; + } + return flags; + } + + private void SizeTxnBatch(Txn txn, long? ver, bool hasWrite) + { + if (txn != null) + { + dataOffset++; // Add info4 byte for MRT. + dataOffset += 8 + FIELD_HEADER_SIZE; + + if (ver.HasValue) + { + dataOffset += 7 + FIELD_HEADER_SIZE; + } + + if (hasWrite && txn.Deadline != 0) + { + dataOffset += 4 + FIELD_HEADER_SIZE; + } + } + } + + private void WriteBatchHeader(Policy policy, int timeout, int fieldCount) + { + int readAttr = Command.INFO1_BATCH; + + if (policy.compress) + { + readAttr |= Command.INFO1_COMPRESS_RESPONSE; + } + + // Write all header data except total size which must be written last. + dataOffset += 8; + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. + dataBuffer[dataOffset++] = (byte)readAttr; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)0; + + Array.Clear(dataBuffer, dataOffset, 10); + dataOffset += 10; + + dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes(0, dataBuffer, dataOffset); + } + + private void WriteBatchBinNames(Key key, Txn txn, long? ver, string[] binNames, BatchAttr attr, Expression filter) + { + WriteBatchRead(key, txn, ver, attr, filter, binNames.Length); + + foreach (string binName in binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + + private void WriteBatchOperations(Key key, Txn txn, long? ver, Operation[] ops, BatchAttr attr, Expression filter) + { + if (attr.hasWrite) + { + WriteBatchWrite(key, txn, ver, attr, filter, 0, ops.Length); + } + else + { + WriteBatchRead(key, txn, ver, attr, filter, ops.Length); + } + + foreach (Operation op in ops) + { + WriteOperation(op); + } + } + + private void WriteBatchRead(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int opCount) + { + if (txn != null) + { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + dataBuffer[dataOffset++] = (byte)attr.txnAttr; + ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + WriteBatchFieldsTxn(key, txn, ver, attr, filter, 0, opCount); + } + else + { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + WriteBatchFieldsReg(key, attr, filter, 0, opCount); + } + } + + private void WriteBatchWrite(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) + { + if (txn != null) + { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_GEN | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + dataBuffer[dataOffset++] = (byte)attr.txnAttr; + ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset); + dataOffset += 2; + ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + WriteBatchFieldsTxn(key, txn, ver, attr, filter, fieldCount, opCount); + } + else + { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_GEN | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + ByteUtil.ShortToBytes((ushort)attr.generation, dataBuffer, dataOffset); + dataOffset += 2; + ByteUtil.IntToBytes((ushort)attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + WriteBatchFieldsReg(key, attr, filter, fieldCount, opCount); + } + } + + private void WriteBatchFieldsTxn(Key key, Txn txn, long? ver, BatchAttr attr, Expression filter, int fieldCount, int opCount) + { + fieldCount++; + + if (ver.HasValue) + { + fieldCount++; + } + + if (attr.hasWrite && txn.Deadline != 0) + { + fieldCount++; + } + + if (filter != null) + { + fieldCount++; + } + + if (attr.sendKey) + { + fieldCount++; + } + + WriteBatchFields(key, fieldCount, opCount); + + WriteFieldLE(txn.Id, FieldType.MRT_ID); + + if (ver.HasValue) + { + WriteFieldVersion(ver.Value); + } + + if (attr.hasWrite && txn.Deadline != 0) + { + WriteFieldLE(txn.Deadline, FieldType.MRT_DEADLINE); + } + + filter?.Write(this); + + if (attr.sendKey) + { + WriteField(key.userKey, FieldType.KEY); + } + } + + private void WriteBatchFieldsReg( + Key key, + BatchAttr attr, + Expression filter, + int fieldCount, + int opCount + ) + { + if (filter != null) + { + fieldCount++; + } + + if (attr.sendKey) + { + fieldCount++; + } + + WriteBatchFields(key, fieldCount, opCount); + + filter?.Write(this); + + if (attr.sendKey) + { + WriteField(key.userKey, FieldType.KEY); + } + } + + private void WriteBatchFields(Key key, int fieldCount, int opCount) + { + fieldCount += 2; + ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += 2; + ByteUtil.ShortToBytes((ushort)opCount, dataBuffer, dataOffset); + dataOffset += 2; + WriteField(key.ns, FieldType.NAMESPACE); + WriteField(key.setName, FieldType.TABLE); + } + + //-------------------------------------------------- + // Scan + //-------------------------------------------------- + + public virtual void SetScan + ( + Cluster cluster, + ScanPolicy policy, + string ns, + string setName, + string[] binNames, + ulong taskId, + NodePartitions nodePartitions + ) + { + Begin(); + int fieldCount = 0; + int partsFullSize = nodePartitions.partsFull.Count * 2; + int partsPartialSize = nodePartitions.partsPartial.Count * 20; + long maxRecords = nodePartitions.recordMax; + + if (ns != null) + { + dataOffset += ByteUtil.EstimateSizeUtf8(ns) + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (setName != null) + { + dataOffset += ByteUtil.EstimateSizeUtf8(setName) + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (partsFullSize > 0) + { + dataOffset += partsFullSize + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (partsPartialSize > 0) + { + dataOffset += partsPartialSize + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (maxRecords > 0) + { + dataOffset += 8 + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (policy.recordsPerSecond > 0) + { + dataOffset += 4 + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + + // Estimate scan timeout size. + dataOffset += 4 + FIELD_HEADER_SIZE; + fieldCount++; + + // Estimate taskId size. + dataOffset += 8 + FIELD_HEADER_SIZE; + fieldCount++; + + if (binNames != null) + { + foreach (string binName in binNames) + { + EstimateOperationSize(binName); + } + } + + SizeBuffer(); + int readAttr = Command.INFO1_READ; + + if (!policy.includeBinData) + { + readAttr |= Command.INFO1_NOBINDATA; + } + + // Clusters that support partition queries also support not sending partition done messages. + int operationCount = (binNames == null) ? 0 : binNames.Length; + WriteHeaderRead(policy, totalTimeout, readAttr, 0, Command.INFO3_PARTITION_DONE, fieldCount, operationCount); + + if (ns != null) + { + WriteField(ns, FieldType.NAMESPACE); + } + + if (setName != null) + { + WriteField(setName, FieldType.TABLE); + } + + if (partsFullSize > 0) + { + WriteFieldHeader(partsFullSize, FieldType.PID_ARRAY); + + foreach (PartitionStatus part in nodePartitions.partsFull) + { + ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); + dataOffset += 2; + } + } + + if (partsPartialSize > 0) + { + WriteFieldHeader(partsPartialSize, FieldType.DIGEST_ARRAY); + + foreach (PartitionStatus part in nodePartitions.partsPartial) + { + Array.Copy(part.digest, 0, dataBuffer, dataOffset, 20); + dataOffset += 20; + } + } + + if (maxRecords > 0) + { + WriteField((ulong)maxRecords, FieldType.MAX_RECORDS); + } + + if (policy.recordsPerSecond > 0) + { + WriteField(policy.recordsPerSecond, FieldType.RECORDS_PER_SECOND); + } + + policy.filterExp?.Write(this); + + // Write scan timeout + WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); + + // Write taskId field + WriteField(taskId, FieldType.QUERY_ID); + + if (binNames != null) + { + foreach (string binName in binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + End(); + } + + //-------------------------------------------------- + // Query + //-------------------------------------------------- + + protected virtual internal void SetQuery + ( + Cluster cluster, + Policy policy, + Statement statement, + ulong taskId, + bool background, + NodePartitions nodePartitions + ) + { + byte[] functionArgBuffer = null; + int fieldCount = 0; + int filterSize = 0; + int binNameSize = 0; + bool isNew = cluster.hasPartitionQuery; + + Begin(); + + if (statement.ns != null) + { + dataOffset += ByteUtil.EstimateSizeUtf8(statement.ns) + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (statement.setName != null) + { + dataOffset += ByteUtil.EstimateSizeUtf8(statement.setName) + FIELD_HEADER_SIZE; + fieldCount++; + } + + // Estimate recordsPerSecond field size. This field is used in new servers and not used + // (but harmless to add) in old servers. + if (statement.recordsPerSecond > 0) + { + dataOffset += 4 + FIELD_HEADER_SIZE; + fieldCount++; + } + + // Estimate socket timeout field size. This field is used in new servers and not used + // (but harmless to add) in old servers. + dataOffset += 4 + FIELD_HEADER_SIZE; + fieldCount++; + + // Estimate taskId field. + dataOffset += 8 + FIELD_HEADER_SIZE; + fieldCount++; + + byte[] packedCtx = null; + + if (statement.filter != null) + { + IndexCollectionType type = statement.filter.CollectionType; + + // Estimate INDEX_TYPE field. + if (type != IndexCollectionType.DEFAULT) + { + dataOffset += FIELD_HEADER_SIZE + 1; + fieldCount++; + } + + // Estimate INDEX_RANGE field. + dataOffset += FIELD_HEADER_SIZE; + filterSize++; // num filters + filterSize += statement.filter.EstimateSize(); + dataOffset += filterSize; + fieldCount++; + + if (!isNew) + { + // Query bin names are specified as a field (Scan bin names are specified later as operations) + // in old servers. Estimate size for selected bin names. + if (statement.binNames != null && statement.binNames.Length > 0) + { + dataOffset += FIELD_HEADER_SIZE; + binNameSize++; // num bin names + + foreach (string binName in statement.binNames) + { + binNameSize += ByteUtil.EstimateSizeUtf8(binName) + 1; + } + dataOffset += binNameSize; + fieldCount++; + } + } + + packedCtx = statement.filter.PackedCtx; + + if (packedCtx != null) + { + dataOffset += FIELD_HEADER_SIZE + packedCtx.Length; + fieldCount++; + } + } + + // Estimate aggregation/background function size. + if (statement.functionName != null) + { + dataOffset += FIELD_HEADER_SIZE + 1; // udf type + dataOffset += ByteUtil.EstimateSizeUtf8(statement.packageName) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(statement.functionName) + FIELD_HEADER_SIZE; + + if (statement.functionArgs.Length > 0) + { + functionArgBuffer = Packer.Pack(statement.functionArgs); + } + else + { + functionArgBuffer = Array.Empty(); + } + dataOffset += FIELD_HEADER_SIZE + functionArgBuffer.Length; + fieldCount += 4; + } + + if (policy.filterExp != null) + { + dataOffset += policy.filterExp.Size(); + fieldCount++; + } + + long maxRecords = 0; + int partsFullSize = 0; + int partsPartialDigestSize = 0; + int partsPartialBValSize = 0; + + if (nodePartitions != null) + { + partsFullSize = nodePartitions.partsFull.Count * 2; + partsPartialDigestSize = nodePartitions.partsPartial.Count * 20; + + if (statement.filter != null) + { + partsPartialBValSize = nodePartitions.partsPartial.Count * 8; + } + maxRecords = nodePartitions.recordMax; + } + + if (partsFullSize > 0) + { + dataOffset += partsFullSize + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (partsPartialDigestSize > 0) + { + dataOffset += partsPartialDigestSize + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (partsPartialBValSize > 0) + { + dataOffset += partsPartialBValSize + FIELD_HEADER_SIZE; + fieldCount++; + } + + // Estimate max records field size. This field is used in new servers and not used + // (but harmless to add) in old servers. + if (maxRecords > 0) + { + dataOffset += 8 + FIELD_HEADER_SIZE; + fieldCount++; + } + + // Operations (used in query execute) and bin names (used in scan/query) are mutually exclusive. + int operationCount = 0; + + if (statement.operations != null) + { + // Estimate size for background operations. + if (!background) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Operations not allowed in foreground query"); + } + + foreach (Operation operation in statement.operations) + { + if (!Operation.IsWrite(operation.type)) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Read operations not allowed in background query"); + } + EstimateOperationSize(operation); + } + operationCount = statement.operations.Length; + } + else if (statement.binNames != null && (isNew || statement.filter == null)) + { + // Estimate size for selected bin names (query bin names already handled for old servers). + foreach (string binName in statement.binNames) + { + EstimateOperationSize(binName); + } + operationCount = statement.binNames.Length; + } + + SizeBuffer(); + + if (background) + { + WriteHeaderWrite((WritePolicy)policy, Command.INFO2_WRITE, fieldCount, operationCount); + } + else + { + QueryPolicy qp = (QueryPolicy)policy; + int readAttr = Command.INFO1_READ; + int writeAttr = 0; + + if (!qp.includeBinData) + { + readAttr |= Command.INFO1_NOBINDATA; + } + + if (qp.shortQuery || qp.expectedDuration == QueryDuration.SHORT) + { + readAttr |= Command.INFO1_SHORT_QUERY; + } + else if (qp.expectedDuration == QueryDuration.LONG_RELAX_AP) + { + writeAttr |= Command.INFO2_RELAX_AP_LONG_QUERY; + } + + int infoAttr = (isNew || statement.filter == null) ? Command.INFO3_PARTITION_DONE : 0; + + WriteHeaderRead(policy, totalTimeout, readAttr, writeAttr, infoAttr, fieldCount, operationCount); + } + + if (statement.ns != null) + { + WriteField(statement.ns, FieldType.NAMESPACE); + } + + if (statement.setName != null) + { + WriteField(statement.setName, FieldType.TABLE); + } + + // Write records per second. + if (statement.recordsPerSecond > 0) + { + WriteField(statement.recordsPerSecond, FieldType.RECORDS_PER_SECOND); + } + + // Write socket idle timeout. + WriteField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); + + // Write taskId field + WriteField(taskId, FieldType.QUERY_ID); + + if (statement.filter != null) + { + IndexCollectionType type = statement.filter.CollectionType; + + if (type != IndexCollectionType.DEFAULT) + { + WriteFieldHeader(1, FieldType.INDEX_TYPE); + dataBuffer[dataOffset++] = (byte)type; + } + + WriteFieldHeader(filterSize, FieldType.INDEX_RANGE); + dataBuffer[dataOffset++] = (byte)1; + dataOffset = statement.filter.Write(dataBuffer, dataOffset); + + if (!isNew) + { + // Query bin names are specified as a field (Scan bin names are specified later as operations) + // in old servers. + if (statement.binNames != null && statement.binNames.Length > 0) + { + WriteFieldHeader(binNameSize, FieldType.QUERY_BINLIST); + dataBuffer[dataOffset++] = (byte)statement.binNames.Length; + + foreach (string binName in statement.binNames) + { + int len = ByteUtil.StringToUtf8(binName, dataBuffer, dataOffset + 1); + dataBuffer[dataOffset] = (byte)len; + dataOffset += len + 1; + } + } + } + + if (packedCtx != null) + { + WriteFieldHeader(packedCtx.Length, FieldType.INDEX_CONTEXT); + Array.Copy(packedCtx, 0, dataBuffer, dataOffset, packedCtx.Length); + dataOffset += packedCtx.Length; + } + } + + if (statement.functionName != null) + { + WriteFieldHeader(1, FieldType.UDF_OP); + dataBuffer[dataOffset++] = background ? (byte)2 : (byte)1; + WriteField(statement.packageName, FieldType.UDF_PACKAGE_NAME); + WriteField(statement.functionName, FieldType.UDF_FUNCTION); + WriteField(functionArgBuffer, FieldType.UDF_ARGLIST); + } + + policy.filterExp?.Write(this); + + if (partsFullSize > 0) + { + WriteFieldHeader(partsFullSize, FieldType.PID_ARRAY); + + foreach (PartitionStatus part in nodePartitions.partsFull) + { + ByteUtil.ShortToLittleBytes((ushort)part.id, dataBuffer, dataOffset); + dataOffset += 2; + } + } + + if (partsPartialDigestSize > 0) + { + WriteFieldHeader(partsPartialDigestSize, FieldType.DIGEST_ARRAY); + + foreach (PartitionStatus part in nodePartitions.partsPartial) + { + Array.Copy(part.digest, 0, dataBuffer, dataOffset, 20); + dataOffset += 20; + } + } + + if (partsPartialBValSize > 0) + { + WriteFieldHeader(partsPartialBValSize, FieldType.BVAL_ARRAY); + + foreach (PartitionStatus part in nodePartitions.partsPartial) + { + ByteUtil.LongToLittleBytes(part.bval, dataBuffer, dataOffset); + dataOffset += 8; + } + } + + if (maxRecords > 0) + { + WriteField((ulong)maxRecords, FieldType.MAX_RECORDS); + } + + if (statement.operations != null) + { + foreach (Operation operation in statement.operations) + { + WriteOperation(operation); + } + } + else if (statement.binNames != null && (isNew || statement.filter == null)) + { + foreach (string binName in statement.binNames) + { + WriteOperation(binName, Operation.Type.READ); + } + } + End(); + } + + //-------------------------------------------------- + // Command Sizing + //-------------------------------------------------- + + private int EstimateKeyAttrSize(Policy policy, Key key, BatchAttr attr, Expression filterExp) + { + int fieldCount = EstimateKeySize(policy, key, attr.hasWrite); + + if (filterExp != null) + { + dataOffset += filterExp.Size(); + fieldCount++; + } + return fieldCount; + } + + private int EstimateKeySize(Policy policy, Key key, bool hasWrite) + { + int fieldCount = EstimateKeySize(key); + + fieldCount += SizeTxn(key, policy.Txn, hasWrite); + + if (policy.sendKey) + { + dataOffset += key.userKey.EstimateSize() + FIELD_HEADER_SIZE + 1; + fieldCount++; + } + return fieldCount; + } + + private int EstimateKeySize(Key key) + { + int fieldCount = 0; + + if (key.ns != null) + { + dataOffset += ByteUtil.EstimateSizeUtf8(key.ns) + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (key.setName != null) + { + dataOffset += ByteUtil.EstimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + fieldCount++; + } + + dataOffset += key.digest.Length + FIELD_HEADER_SIZE; + fieldCount++; + + return fieldCount; + } + + private int EstimateUdfSize(string packageName, string functionName, byte[] bytes) + { + dataOffset += ByteUtil.EstimateSizeUtf8(packageName) + FIELD_HEADER_SIZE; + dataOffset += ByteUtil.EstimateSizeUtf8(functionName) + FIELD_HEADER_SIZE; + dataOffset += bytes.Length + FIELD_HEADER_SIZE; + return 3; + } + + private void EstimateOperationSize(Bin bin) + { + dataOffset += ByteUtil.EstimateSizeUtf8(bin.name) + OPERATION_HEADER_SIZE; + dataOffset += bin.value.EstimateSize(); + } + + private void EstimateOperationSize(Operation operation) + { + dataOffset += ByteUtil.EstimateSizeUtf8(operation.binName) + OPERATION_HEADER_SIZE; + dataOffset += operation.value.EstimateSize(); + } + + private void EstimateReadOperationSize(Operation operation) + { + if (Operation.IsWrite(operation.type)) + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Write operations not allowed in batch read"); + } + dataOffset += ByteUtil.EstimateSizeUtf8(operation.binName) + OPERATION_HEADER_SIZE; + dataOffset += operation.value.EstimateSize(); + } + + private void EstimateOperationSize(string binName) + { + dataOffset += ByteUtil.EstimateSizeUtf8(binName) + OPERATION_HEADER_SIZE; + } + + private void EstimateOperationSize() + { + dataOffset += OPERATION_HEADER_SIZE; + } + + //-------------------------------------------------- + // Command Writes + //-------------------------------------------------- + + /// + /// Header write for write commands. + /// + private void WriteHeaderWrite(WritePolicy policy, int writeAttr, int fieldCount, int operationCount) + { + // Set flags. + int generation = 0; + int infoAttr = 0; + + switch (policy.recordExistsAction) + { + case RecordExistsAction.UPDATE: + break; + case RecordExistsAction.UPDATE_ONLY: + infoAttr |= Command.INFO3_UPDATE_ONLY; + break; + case RecordExistsAction.REPLACE: + infoAttr |= Command.INFO3_CREATE_OR_REPLACE; + break; + case RecordExistsAction.REPLACE_ONLY: + infoAttr |= Command.INFO3_REPLACE_ONLY; + break; + case RecordExistsAction.CREATE_ONLY: + writeAttr |= Command.INFO2_CREATE_ONLY; + break; + } + + switch (policy.generationPolicy) + { + case GenerationPolicy.NONE: + break; + case GenerationPolicy.EXPECT_GEN_EQUAL: + generation = policy.generation; + writeAttr |= Command.INFO2_GENERATION; + break; + case GenerationPolicy.EXPECT_GEN_GT: + generation = policy.generation; + writeAttr |= Command.INFO2_GENERATION_GT; + break; + } + + if (policy.commitLevel == CommitLevel.COMMIT_MASTER) + { + infoAttr |= Command.INFO3_COMMIT_MASTER; + } + + if (policy.durableDelete) + { + writeAttr |= Command.INFO2_DURABLE_DELETE; + } + + dataOffset += 8; + + // Write all header data except total size which must be written last. + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)writeAttr; + dataBuffer[dataOffset++] = (byte)infoAttr; + dataBuffer[dataOffset++] = 0; + dataBuffer[dataOffset++] = 0; // clear the result code + dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)policy.expiration, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); + } + + /// + /// Header write for operate command. + /// + private void WriteHeaderReadWrite + ( + WritePolicy policy, + OperateArgs args, + int fieldCount + ) + { + // Set flags. + int generation = 0; + int ttl = args.hasWrite ? policy.expiration : policy.readTouchTtlPercent; + int readAttr = args.readAttr; + int writeAttr = args.writeAttr; + int infoAttr = 0; + int operationCount = args.operations.Length; + + switch (policy.recordExistsAction) + { + case RecordExistsAction.UPDATE: + break; + case RecordExistsAction.UPDATE_ONLY: + infoAttr |= Command.INFO3_UPDATE_ONLY; + break; + case RecordExistsAction.REPLACE: + infoAttr |= Command.INFO3_CREATE_OR_REPLACE; + break; + case RecordExistsAction.REPLACE_ONLY: + infoAttr |= Command.INFO3_REPLACE_ONLY; + break; + case RecordExistsAction.CREATE_ONLY: + writeAttr |= Command.INFO2_CREATE_ONLY; + break; + } + + switch (policy.generationPolicy) + { + case GenerationPolicy.NONE: + break; + case GenerationPolicy.EXPECT_GEN_EQUAL: + generation = policy.generation; + writeAttr |= Command.INFO2_GENERATION; + break; + case GenerationPolicy.EXPECT_GEN_GT: + generation = policy.generation; + writeAttr |= Command.INFO2_GENERATION_GT; + break; + } + + if (policy.commitLevel == CommitLevel.COMMIT_MASTER) + { + infoAttr |= Command.INFO3_COMMIT_MASTER; + } + + if (policy.durableDelete) + { + writeAttr |= Command.INFO2_DURABLE_DELETE; + } + switch (policy.readModeSC) + { + case ReadModeSC.SESSION: + break; + case ReadModeSC.LINEARIZE: + infoAttr |= Command.INFO3_SC_READ_TYPE; + break; + case ReadModeSC.ALLOW_REPLICA: + infoAttr |= Command.INFO3_SC_READ_RELAX; + break; + case ReadModeSC.ALLOW_UNAVAILABLE: + infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX; + break; + } + + if (policy.readModeAP == ReadModeAP.ALL) + { + readAttr |= Command.INFO1_READ_MODE_AP_ALL; + } + + if (policy.compress) + { + readAttr |= Command.INFO1_COMPRESS_RESPONSE; + } + + dataOffset += 8; + + // Write all header data except total size which must be written last. + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. + dataBuffer[dataOffset++] = (byte)readAttr; + dataBuffer[dataOffset++] = (byte)writeAttr; + dataBuffer[dataOffset++] = (byte)infoAttr; + dataBuffer[dataOffset++] = 0; // unused + dataBuffer[dataOffset++] = 0; // clear the result code + dataOffset += ByteUtil.IntToBytes((uint)generation, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)ttl, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); + } + + /// + /// Header write for read commands. + /// + private void WriteHeaderRead + ( + Policy policy, + int timeout, + int readAttr, + int writeAttr, + int infoAttr, + int fieldCount, + int operationCount + ) + { + switch (policy.readModeSC) + { + case ReadModeSC.SESSION: + break; + case ReadModeSC.LINEARIZE: + infoAttr |= Command.INFO3_SC_READ_TYPE; + break; + case ReadModeSC.ALLOW_REPLICA: + infoAttr |= Command.INFO3_SC_READ_RELAX; + break; + case ReadModeSC.ALLOW_UNAVAILABLE: + infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX; + break; + } + + if (policy.readModeAP == ReadModeAP.ALL) + { + readAttr |= Command.INFO1_READ_MODE_AP_ALL; + } + + if (policy.compress) + { + readAttr |= Command.INFO1_COMPRESS_RESPONSE; + } + + dataOffset += 8; + + // Write all header data except total size which must be written last. + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. + dataBuffer[dataOffset++] = (byte)readAttr; + dataBuffer[dataOffset++] = (byte)writeAttr; + dataBuffer[dataOffset++] = (byte)infoAttr; + + for (int i = 0; i < 6; i++) + { + dataBuffer[dataOffset++] = 0; + } + dataOffset += ByteUtil.IntToBytes((uint)policy.readTouchTtlPercent, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)timeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); + } + + /// + /// Header write for read header commands. + /// + private void WriteHeaderReadHeader(Policy policy, int readAttr, int fieldCount, int operationCount) + { + int infoAttr = 0; + + switch (policy.readModeSC) + { + case ReadModeSC.SESSION: + break; + case ReadModeSC.LINEARIZE: + infoAttr |= Command.INFO3_SC_READ_TYPE; + break; + case ReadModeSC.ALLOW_REPLICA: + infoAttr |= Command.INFO3_SC_READ_RELAX; + break; + case ReadModeSC.ALLOW_UNAVAILABLE: + infoAttr |= Command.INFO3_SC_READ_TYPE | Command.INFO3_SC_READ_RELAX; + break; + } + + if (policy.readModeAP == ReadModeAP.ALL) + { + readAttr |= Command.INFO1_READ_MODE_AP_ALL; + } + + dataOffset += 8; + + // Write all header data except total size which must be written last. + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. + dataBuffer[dataOffset++] = (byte)readAttr; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)infoAttr; + + for (int i = 0; i < 6; i++) + { + dataBuffer[dataOffset++] = 0; + } + dataOffset += ByteUtil.IntToBytes((uint)policy.readTouchTtlPercent, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); + } + + /// + /// Header write for batch single commands. + /// + private void WriteKeyAttr( + Policy policy, + Key key, + BatchAttr attr, + Expression filterExp, + int fieldCount, + int operationCount + ) + { + dataOffset += 8; + + // Write all header data except total size which must be written last. + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; // Message header length. + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + dataBuffer[dataOffset++] = 0; // unused + dataBuffer[dataOffset++] = 0; // clear the result code + dataOffset += ByteUtil.IntToBytes((uint)attr.generation, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)attr.expiration, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)operationCount, dataBuffer, dataOffset); + + WriteKey(policy, key, attr.hasWrite); + + filterExp?.Write(this); + } + + private void WriteKey(Policy policy, Key key, bool sendDeadline) + { + WriteKey(key); + WriteTxn(policy.Txn, sendDeadline); + + if (policy.sendKey) + { + WriteField(key.userKey, FieldType.KEY); + } + } + + private void WriteKey(Key key) + { + // Write key into dataBuffer. + if (key.ns != null) + { + WriteField(key.ns, FieldType.NAMESPACE); + } + + if (key.setName != null) + { + WriteField(key.setName, FieldType.TABLE); + } + + WriteField(key.digest, FieldType.DIGEST_RIPE); + } + + private int WriteReadOnlyOperations(Operation[] ops, int readAttr) + { + bool readBin = false; + bool readHeader = false; + + foreach (Operation op in ops) + { + switch (op.type) + { + case Operation.Type.READ: + // Read all bins if no bin is specified. + if (op.binName == null) + { + readAttr |= Command.INFO1_GET_ALL; + } + readBin = true; + break; + + case Operation.Type.READ_HEADER: + readHeader = true; + break; + + default: + break; + } + WriteOperation(op); + } + + if (readHeader && !readBin) + { + readAttr |= Command.INFO1_NOBINDATA; + } + return readAttr; + } + + private void WriteOperation(Bin bin, Operation.Type operationType) + { + int nameLength = ByteUtil.StringToUtf8(bin.name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); + int valueLength = bin.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength); + + ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); + dataBuffer[dataOffset++] = (byte)bin.value.Type; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)nameLength; + dataOffset += nameLength + valueLength; + } + + private void WriteOperation(Operation operation) + { + int nameLength = ByteUtil.StringToUtf8(operation.binName, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); + int valueLength = operation.value.Write(dataBuffer, dataOffset + OPERATION_HEADER_SIZE + nameLength); + + ByteUtil.IntToBytes((uint)(nameLength + valueLength + 4), dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = Operation.GetProtocolType(operation.type); + dataBuffer[dataOffset++] = (byte)operation.value.Type; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)nameLength; + dataOffset += nameLength + valueLength; + } + + private void WriteOperation(string name, Operation.Type operationType) + { + int nameLength = ByteUtil.StringToUtf8(name, dataBuffer, dataOffset + OPERATION_HEADER_SIZE); + + ByteUtil.IntToBytes((uint)(nameLength + 4), dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)nameLength; + dataOffset += nameLength; + } + + private void WriteOperation(Operation.Type operationType) + { + ByteUtil.IntToBytes(4, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = Operation.GetProtocolType(operationType); + dataBuffer[dataOffset++] = 0; + dataBuffer[dataOffset++] = 0; + dataBuffer[dataOffset++] = 0; + } + + private int SizeTxn(Key key, Txn txn, bool hasWrite) + { + int fieldCount = 0; + + if (txn != null) + { + dataOffset += 8 + FIELD_HEADER_SIZE; + fieldCount++; + + Version = txn.GetReadVersion(key); + + if (Version.HasValue) + { + dataOffset += 7 + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (hasWrite && txn.Deadline != 0) + { + dataOffset += 4 + FIELD_HEADER_SIZE; + fieldCount++; + } + } + return fieldCount; + } + + private void WriteTxn(Txn txn, bool sendDeadline) + { + if (txn != null) + { + WriteFieldLE(txn.Id, FieldType.MRT_ID); + + if (Version.HasValue) + { + WriteFieldVersion(Version.Value); + } + + if (sendDeadline && txn.Deadline != 0) + { + WriteFieldLE(txn.Deadline, FieldType.MRT_DEADLINE); + } + } + } + + private void WriteFieldVersion(long ver) + { + WriteFieldHeader(7, FieldType.RECORD_VERSION); + ByteUtil.LongToVersionBytes(ver, dataBuffer, dataOffset); + dataOffset += 7; + } + + private void WriteField(Value value, int type) + { + int offset = dataOffset + FIELD_HEADER_SIZE; + dataBuffer[offset++] = (byte)value.Type; + int len = value.Write(dataBuffer, offset) + 1; + WriteFieldHeader(len, type); + dataOffset += len; + } + + private void WriteField(string str, int type) + { + int len = ByteUtil.StringToUtf8(str, dataBuffer, dataOffset + FIELD_HEADER_SIZE); + WriteFieldHeader(len, type); + dataOffset += len; + } + + private void WriteField(byte[] bytes, int type) + { + Array.Copy(bytes, 0, dataBuffer, dataOffset + FIELD_HEADER_SIZE, bytes.Length); + WriteFieldHeader(bytes.Length, type); + dataOffset += bytes.Length; + } + + private void WriteField(int val, int type) + { + WriteFieldHeader(4, type); + ByteUtil.IntToBytes((uint)val, dataBuffer, dataOffset); + dataOffset += 4; + } + + private void WriteFieldLE(int val, int type) + { + WriteFieldHeader(4, type); + ByteUtil.IntToLittleBytes((uint)val, dataBuffer, dataOffset); + dataOffset += 4; + } + + private void WriteField(ulong val, int type) + { + WriteFieldHeader(8, type); + ByteUtil.LongToBytes(val, dataBuffer, dataOffset); + dataOffset += 8; + } + + private void WriteFieldLE(long val, int type) + { + WriteFieldHeader(8, type); + ByteUtil.LongToLittleBytes((ulong)val, dataBuffer, dataOffset); + dataOffset += 8; + } + + private void WriteFieldHeader(int size, int type) + { + ByteUtil.IntToBytes((uint)size + 1, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = (byte)type; + } + + internal virtual void WriteExpHeader(int size) + { + WriteFieldHeader(size, FieldType.FILTER_EXP); + } + + private void Begin() + { + dataOffset = MSG_TOTAL_HEADER_SIZE; + } + + private bool SizeBuffer(Policy policy) + { + if (policy.compress && dataOffset > COMPRESS_THRESHOLD) + { + // Command will be compressed. First, write uncompressed command + // into separate dataBuffer. Save normal dataBuffer for compressed command. + // Normal dataBuffer in async mode is from dataBuffer pool that is used to + // minimize memory pinning during socket operations. + dataBuffer = new byte[dataOffset]; + dataOffset = 0; + return true; + } + else + { + // Command will be uncompressed. + SizeBuffer(); + return false; + } + } + + private void End(bool compress) + { + if (!compress) + { + End(); + return; + } + + // Write proto header. + ulong size = ((ulong)dataOffset - 8) | (CL_MSG_VERSION << 56) | (AS_MSG_TYPE << 48); + ByteUtil.LongToBytes(size, dataBuffer, 0); + + byte[] srcBuf = dataBuffer; + int srcSize = dataOffset; + + // Increase requested dataBuffer size in case compressed dataBuffer size is + // greater than the uncompressed dataBuffer size. + dataOffset += 16 + 100; + + // This method finds dataBuffer of requested size, resets dataOffset to segment offset + // and returns dataBuffer max size; + int trgBufSize = SizeBuffer(); + + // Compress to target starting at new dataOffset plus new header. + int trgSize = ByteUtil.Compress(srcBuf, srcSize, dataBuffer, dataOffset + 16, trgBufSize - 16) + 16; + + ulong proto = ((ulong)trgSize - 8) | (CL_MSG_VERSION << 56) | (MSG_TYPE_COMPRESSED << 48); + ByteUtil.LongToBytes(proto, dataBuffer, dataOffset); + ByteUtil.LongToBytes((ulong)srcSize, dataBuffer, dataOffset + 8); + SetLength(trgSize); + } + + protected internal abstract int SizeBuffer(); + protected internal abstract void End(); + protected internal abstract void SetLength(int length); + + //-------------------------------------------------- + // Response Parsing + //-------------------------------------------------- + + internal virtual void SkipKey(int fieldCount) + { + // There can be fields in the response (setname etc). + // But for now, ignore them. Expose them to the API if needed in the future. + for (int i = 0; i < fieldCount; i++) + { + int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4 + fieldlen; + } + } + + internal virtual Key ParseKey(int fieldCount, out ulong bval) + { + byte[] digest = null; + string ns = null; + string setName = null; + Value userKey = null; + bval = 0; + + for (int i = 0; i < fieldCount; i++) + { + int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int fieldtype = dataBuffer[dataOffset++]; + int size = fieldlen - 1; + + switch (fieldtype) + { + case FieldType.DIGEST_RIPE: + digest = new byte[size]; + Array.Copy(dataBuffer, dataOffset, digest, 0, size); + break; + + case FieldType.NAMESPACE: + ns = ByteUtil.Utf8ToString(dataBuffer, dataOffset, size); + break; + + case FieldType.TABLE: + setName = ByteUtil.Utf8ToString(dataBuffer, dataOffset, size); + break; + + case FieldType.KEY: + int type = dataBuffer[dataOffset++]; + size--; + userKey = ByteUtil.BytesToKeyValue((ParticleType)type, dataBuffer, dataOffset, size); + break; + + case FieldType.BVAL_ARRAY: + bval = (ulong)ByteUtil.LittleBytesToLong(dataBuffer, dataOffset); + break; + } + dataOffset += size; + } + return new Key(ns, digest, setName, userKey); + } + + public long? ParseVersion(int fieldCount) + { + long? version = null; + + for (int i = 0; i < fieldCount; i++) + { + int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.RECORD_VERSION && size == 7) + { + version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset); + } + dataOffset += size; + } + return version; + } + + protected void ParseFields(Txn txn, Key key, bool hasWrite) + { + if (txn == null) + { + SkipFields(fieldCount); + return; + } + + long? version = null; + + for (int i = 0; i < fieldCount; i++) + { + int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.RECORD_VERSION) + { + if (size == 7) + { + version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset); + } + else + { + throw new AerospikeException("Record version field has invalid size: " + size); + } + } + dataOffset += size; + } + + if (hasWrite) + { + txn.OnWrite(key, version, resultCode); + } + else + { + txn.OnRead(key, version); + } + } + + protected void ParseTxnDeadline(Txn txn) + { + for (int i = 0; i < fieldCount; i++) + { + int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.MRT_DEADLINE) + { + int deadline = ByteUtil.LittleBytesToInt(dataBuffer, dataOffset); + txn.Deadline = deadline; + } + dataOffset += size; + } + } + + protected void SkipFields(int fieldCount) + { + // There can be fields in the response (setname etc). + // But for now, ignore them. Expose them to the API if needed in the future. + for (int i = 0; i < fieldCount; i++) + { + int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4 + fieldlen; + } + } + + public static bool BatchInDoubt(bool isWrite, int commandSentCounter) + { + return isWrite && commandSentCounter > 1; + } + + public interface BatchOffsets + { + int Size(); + int Get(int i); + } + + private class BatchOffsetsNative : BatchOffsets + { + private int size; + private int[] offsets; + + public BatchOffsetsNative(BatchNode batch) + { + this.size = batch.offsetsSize; + this.offsets = batch.offsets; + } + + public int Size() + { + return size; + } + + public int Get(int i) + { + return offsets[i]; + } + } + } +} +#pragma warning restore 0618 diff --git a/AerospikeClient/Command/DeleteCommand.cs b/AerospikeClient/Command/DeleteCommand.cs index fc871ec5..bf011137 100644 --- a/AerospikeClient/Command/DeleteCommand.cs +++ b/AerospikeClient/Command/DeleteCommand.cs @@ -1,71 +1,71 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -using System; - -namespace Aerospike.Client -{ - public sealed class DeleteCommand : SyncWriteCommand - { - private bool existed; - - public DeleteCommand(Cluster cluster, WritePolicy writePolicy, Key key) - : base(cluster, writePolicy, key) - { - cluster.AddCommandCount(); - } - - protected internal override void WriteBuffer() - { - SetDelete(writePolicy, key); - } - - protected internal override void ParseResult(IConnection conn) - { - ParseHeader(conn); - - if (resultCode == 0) - { - existed = true; - return; - } - - if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) - { - existed = false; - return; - } - - if (resultCode == ResultCode.FILTERED_OUT) - { - if (writePolicy.failOnFilteredOut) - { - throw new AerospikeException(resultCode); - } - existed = true; - return; - } - - throw new AerospikeException(resultCode); - } - - public bool Existed() - { - return existed; - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +using System; + +namespace Aerospike.Client +{ + public sealed class DeleteCommand : SyncWriteCommand + { + private bool existed; + + public DeleteCommand(Cluster cluster, WritePolicy writePolicy, Key key) + : base(cluster, writePolicy, key) + { + } + + protected internal override void WriteBuffer() + { + SetDelete(writePolicy, key); + } + + protected internal override void ParseResult(IConnection conn) + { + ParseHeader(conn); + ParseFields(policy.Txn, key, true); + + if (resultCode == ResultCode.OK) + { + existed = true; + return; + } + + if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) + { + existed = false; + return; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (writePolicy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + existed = true; + return; + } + + throw new AerospikeException(resultCode); + } + + public bool Existed() + { + return existed; + } + } +} diff --git a/AerospikeClient/Command/SyncCommand.cs b/AerospikeClient/Command/SyncCommand.cs index 4c426e7f..7d4bfc92 100644 --- a/AerospikeClient/Command/SyncCommand.cs +++ b/AerospikeClient/Command/SyncCommand.cs @@ -1,519 +1,442 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using System; -using System.Net.Sockets; -using System.Runtime.InteropServices; -using static Aerospike.Client.Latency; - -namespace Aerospike.Client -{ - public abstract class SyncCommand : Command - { - protected readonly Cluster cluster; - protected readonly Policy policy; - internal int iteration = 1; - internal int commandSentCounter; - internal DateTime deadline; - protected int resultCode; - protected int generation; - protected int expiration; - protected int fieldCount; - protected int opCount; - - /// - /// Default constructor. - /// - public SyncCommand(Cluster cluster, Policy policy) - : base(policy.socketTimeout, policy.totalTimeout, policy.maxRetries) - { - this.cluster = cluster; - this.policy = policy; - this.deadline = DateTime.MinValue; - } - - /// - /// Scan/Query constructor. - /// - public SyncCommand(Cluster cluster, Policy policy, int socketTimeout, int totalTimeout) - : base(socketTimeout, totalTimeout, 0) - { - this.cluster = cluster; - this.policy = policy; - this.deadline = DateTime.MinValue; - } - - public virtual void Execute() - { - if (totalTimeout > 0) - { - deadline = DateTime.UtcNow.AddMilliseconds(totalTimeout); - } - ExecuteCommand(); - } - - public void ExecuteCommand() - { - Node node; - AerospikeException exception = null; - ValueStopwatch metricsWatch = new(); - LatencyType latencyType = cluster.MetricsEnabled ? GetLatencyType() : LatencyType.NONE; - bool isClientTimeout; - - // Execute command until successful, timed out or maximum iterations have been reached. - while (true) - { - try - { - node = GetNode(); - } - catch (AerospikeException ae) - { - ae.Policy = policy; - ae.Iteration = iteration; - ae.SetInDoubt(IsWrite(), commandSentCounter); - throw; - } - - try - { - node.ValidateErrorCount(); - if (latencyType != LatencyType.NONE) - { - metricsWatch = ValueStopwatch.StartNew(); - } - Connection conn = node.GetConnection(socketTimeout, policy.TimeoutDelay); - - try - { - // Set command buffer. - WriteBuffer(); - - // Send command. - conn.Write(dataBuffer, dataOffset); - commandSentCounter++; - - // Parse results. - ParseResult(conn); - - // Put connection back in pool. - node.PutConnection(conn); - - if (latencyType != LatencyType.NONE) - { - node.AddLatency(latencyType, metricsWatch.Elapsed.TotalMilliseconds); - } - - // Command has completed successfully. Exit method. - return; - } - catch (AerospikeException ae) - { - if (ae.KeepConnection()) - { - // Put connection back in pool. - node.PutConnection(conn); - } - else - { - // Close socket to flush out possible garbage. Do not put back in pool. - node.CloseConnectionOnError(conn); - } - - if (ae.Result == ResultCode.TIMEOUT) - { - // Retry on server timeout. - exception = new AerospikeException.Timeout(policy, false); - isClientTimeout = false; - node.IncrErrorRate(); - node.AddTimeout(); - } - else if (ae.Result == ResultCode.DEVICE_OVERLOAD) - { - // Add to circuit breaker error count and retry. - exception = ae; - isClientTimeout = false; - node.IncrErrorRate(); - node.AddError(); - } - else - { - node.AddError(); - throw; - } - } - catch (Connection.ReadTimeout crt) - { - if (policy.TimeoutDelay > 0) - { - cluster.RecoverConnection(new ConnectionRecover(conn, node, policy.TimeoutDelay, crt, IsSingle())); - conn = null; - } - else - { - node.CloseConnection(conn); - } - exception = new AerospikeException.Timeout(policy, true); - isClientTimeout = true; - node.AddTimeout(); - } - catch (SocketException se) - { - // Socket errors are considered temporary anomalies. - // Retry after closing connection. - node.CloseConnectionOnError(conn); - - if (se.SocketErrorCode == SocketError.TimedOut) - { - isClientTimeout = true; - node.AddTimeout(); - } - else - { - exception = new AerospikeException.Connection(se); - isClientTimeout = false; - node.AddError(); - } - } - catch (IOException ioe) - { - // IO errors are considered temporary anomalies. Retry. - // Log.info("IOException: " + tranId + ',' + node + ',' + sequence + ',' + iteration); - node.CloseConnection(conn); - exception = new AerospikeException.Connection(ioe); - isClientTimeout = false; - node.AddError(); - } - catch (Exception) - { - // All other exceptions are considered fatal. Do not retry. - // Close socket to flush out possible garbage. Do not put back in pool. - node.CloseConnectionOnError(conn); - node.AddError(); - throw; - } - } - catch (SocketException se) - { - // This exception might happen after initial connection succeeded, but - // user login failed with a socket error. Retry. - if (se.SocketErrorCode == SocketError.TimedOut) - { - isClientTimeout = true; - node.AddTimeout(); - } - else - { - exception = new AerospikeException.Connection(se); - isClientTimeout = false; - node.AddError(); - } - } - catch (IOException ioe) - { - // IO errors are considered temporary anomalies. Retry. - // Log.info("IOException: " + tranId + ',' + node + ',' + sequence + ',' + iteration); - exception = new AerospikeException.Connection(ioe); - isClientTimeout = false; - node.AddError(); - } - catch (Connection.ReadTimeout) - { - // Connection already handled. - exception = new AerospikeException.Timeout(policy, true); - isClientTimeout = true; - node.AddTimeout(); - } - catch (AerospikeException.Connection ce) - { - // Socket connection error has occurred. Retry. - exception = ce; - isClientTimeout = false; - node.AddError(); - } - catch (AerospikeException.Backoff be) - { - // Node is in backoff state. Retry, hopefully on another node. - exception = be; - isClientTimeout = false; - node.AddError(); - } - catch (AerospikeException ae) - { - ae.Node = node; - ae.Policy = policy; - ae.Iteration = iteration; - ae.SetInDoubt(IsWrite(), commandSentCounter); - node.AddError(); - throw; - } - catch (Exception) - { - node.AddError(); - throw; - } - - // Check maxRetries. - if (iteration > maxRetries) - { - break; - } - - if (totalTimeout > 0) - { - // Check for total timeout. - long remaining = (long)deadline.Subtract(DateTime.UtcNow).TotalMilliseconds - policy.sleepBetweenRetries; - - if (remaining <= 0) - { - break; - } - - if (remaining < totalTimeout) - { - totalTimeout = (int)remaining; - - if (socketTimeout > totalTimeout) - { - socketTimeout = totalTimeout; - } - } - } - - if (!isClientTimeout && policy.sleepBetweenRetries > 0) - { - // Sleep before trying again. - Util.Sleep(policy.sleepBetweenRetries); - } - - iteration++; - - if (!PrepareRetry(isClientTimeout || exception.Result != ResultCode.SERVER_NOT_AVAILABLE)) - { - // Batch may be retried in separate commands. - if (RetryBatch(cluster, socketTimeout, totalTimeout, deadline, iteration, commandSentCounter)) - { - // Batch was retried in separate commands. Complete this command. - return; - } - } - - cluster.AddRetry(); - } - - // Retries have been exhausted. Throw last exception. - if (isClientTimeout) - { - exception = new AerospikeException.Timeout(policy, true); - } - exception.Node = node; - exception.Policy = policy; - exception.Iteration = iteration; - exception.SetInDoubt(IsWrite(), commandSentCounter); - throw exception; - } - - protected internal sealed override int SizeBuffer() - { - dataBuffer = ThreadLocalData.GetBuffer(); - - if (dataOffset > dataBuffer.Length) - { - dataBuffer = ThreadLocalData.ResizeBuffer(dataOffset); - } - dataOffset = 0; - return dataBuffer.Length; - } - - protected internal void SizeBuffer(int size) - { - if (size > dataBuffer.Length) - { - dataBuffer = ThreadLocalData.ResizeBuffer(size); - } - } - - protected void SkipFields(int fieldCount) - { - // There can be fields in the response (setname etc). - // But for now, ignore them. Expose them to the API if needed in the future. - for (int i = 0; i < fieldCount; i++) - { - int fieldlen = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4 + fieldlen; - } - } - - protected internal sealed override void End() - { - // Write total size of message. - ulong size = ((ulong)dataOffset - 8) | (CL_MSG_VERSION << 56) | (AS_MSG_TYPE << 48); - ByteUtil.LongToBytes(size, dataBuffer, 0); - } - - protected void ParseHeader(IConnection conn) - { - // Read header. - conn.ReadFully(dataBuffer, 8, Command.STATE_READ_HEADER); - - long sz = ByteUtil.BytesToLong(dataBuffer, 0); - int receiveSize = (int)(sz & 0xFFFFFFFFFFFFL); - - if (receiveSize <= 0) - { - throw new AerospikeException("Invalid receive size: " + receiveSize); - } - - SizeBuffer(receiveSize); - conn.ReadFully(dataBuffer, receiveSize, Command.STATE_READ_DETAIL); - conn.UpdateLastUsed(); - - ulong type = (ulong)(sz >> 48) & 0xff; - - if (type == Command.AS_MSG_TYPE) - { - dataOffset = 5; - } - else if (type == Command.MSG_TYPE_COMPRESSED) - { - int usize = (int)ByteUtil.BytesToLong(dataBuffer, 0); - byte[] ubuf = new byte[usize]; - - ByteUtil.Decompress(dataBuffer, 8, receiveSize, ubuf, usize); - dataBuffer = ubuf; - dataOffset = 13; - } - else - { - throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE); - } - - this.resultCode = dataBuffer[dataOffset] & 0xFF; - dataOffset++; - this.generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - this.expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 8; - this.fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - this.opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); - dataOffset += 2; - } - - protected void ParseFields(Txn txn, Key key, bool hasWrite) - { - if (txn == null) - { - SkipFields(fieldCount); - return; - } - - long? version = null; - - for (int i = 0; i < fieldCount; i++) - { - int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - - int type = dataBuffer[dataOffset++]; - int size = len - 1; - - if (type == FieldType.RECORD_VERSION) - { - if (size == 7) - { - version = ByteUtil.VersionBytesToLong(dataBuffer, dataOffset); - } - else - { - throw new AerospikeException("Record version field has invalid size: " + size); - } - } - dataOffset += size; - } - - if (hasWrite) - { - txn.OnWrite(key, version, resultCode); - } - else - { - txn.OnRead(key, version); - } - } - - protected void ParseTxnDeadline(Txn txn) - { - for (int i = 0; i < fieldCount; i++) - { - int len = ByteUtil.BytesToInt(dataBuffer, dataOffset); - dataOffset += 4; - - int type = dataBuffer[dataOffset++]; - int size = len - 1; - - if (type == FieldType.MRT_DEADLINE) - { - int deadline = ByteUtil.LittleBytesToInt(dataBuffer, dataOffset); - txn.Deadline = deadline; - } - dataOffset += size; - } - } - - protected internal sealed override void SetLength(int length) - { - dataOffset = length; - } - - // Do nothing by default. Write commands will override this method. - protected internal virtual void OnInDoubt() - { - - } - - protected internal virtual bool RetryBatch - ( - Cluster cluster, - int socketTimeout, - int totalTimeout, - DateTime deadline, - int iteration, - int commandSentCounter - ) - { - // Override this method in batch to regenerate node assignments. - return false; - } - - protected internal virtual bool IsWrite() - { - return false; - } - - protected virtual bool IsSingle() - { - return true; - } - - protected internal abstract Node GetNode(); - - protected abstract LatencyType GetLatencyType(); - protected internal abstract void WriteBuffer(); - protected internal abstract void ParseResult(IConnection conn); - protected internal abstract bool PrepareRetry(bool timeout); - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using System; +using System.Net.Sockets; +using System.Runtime.InteropServices; +using static Aerospike.Client.Latency; + +namespace Aerospike.Client +{ + public abstract class SyncCommand : Command + { + protected readonly Cluster cluster; + protected readonly Policy policy; + internal int iteration = 1; + internal int commandSentCounter; + internal DateTime deadline; + + /// + /// Default constructor. + /// + public SyncCommand(Cluster cluster, Policy policy) + : base(policy.socketTimeout, policy.totalTimeout, policy.maxRetries) + { + this.cluster = cluster; + this.policy = policy; + this.deadline = DateTime.MinValue; + } + + /// + /// Scan/Query constructor. + /// + public SyncCommand(Cluster cluster, Policy policy, int socketTimeout, int totalTimeout) + : base(socketTimeout, totalTimeout, 0) + { + this.cluster = cluster; + this.policy = policy; + this.deadline = DateTime.MinValue; + } + + public virtual void Execute() + { + if (totalTimeout > 0) + { + deadline = DateTime.UtcNow.AddMilliseconds(totalTimeout); + } + ExecuteCommand(); + } + + public void ExecuteCommand() + { + Node node; + AerospikeException exception = null; + ValueStopwatch metricsWatch = new(); + LatencyType latencyType = cluster.MetricsEnabled ? GetLatencyType() : LatencyType.NONE; + bool isClientTimeout; + + // Execute command until successful, timed out or maximum iterations have been reached. + while (true) + { + try + { + node = GetNode(); + } + catch (AerospikeException ae) + { + ae.Policy = policy; + ae.Iteration = iteration; + ae.SetInDoubt(IsWrite(), commandSentCounter); + throw; + } + + try + { + node.ValidateErrorCount(); + if (latencyType != LatencyType.NONE) + { + metricsWatch = ValueStopwatch.StartNew(); + } + Connection conn = node.GetConnection(socketTimeout, policy.TimeoutDelay); + + try + { + // Set command buffer. + WriteBuffer(); + + // Send command. + conn.Write(dataBuffer, dataOffset); + commandSentCounter++; + + // Parse results. + ParseResult(conn); + + // Put connection back in pool. + node.PutConnection(conn); + + if (latencyType != LatencyType.NONE) + { + node.AddLatency(latencyType, metricsWatch.Elapsed.TotalMilliseconds); + } + + // Command has completed successfully. Exit method. + return; + } + catch (AerospikeException ae) + { + if (ae.KeepConnection()) + { + // Put connection back in pool. + node.PutConnection(conn); + } + else + { + // Close socket to flush out possible garbage. Do not put back in pool. + node.CloseConnectionOnError(conn); + } + + if (ae.Result == ResultCode.TIMEOUT) + { + // Retry on server timeout. + exception = new AerospikeException.Timeout(policy, false); + isClientTimeout = false; + node.IncrErrorRate(); + node.AddTimeout(); + } + else if (ae.Result == ResultCode.DEVICE_OVERLOAD) + { + // Add to circuit breaker error count and retry. + exception = ae; + isClientTimeout = false; + node.IncrErrorRate(); + node.AddError(); + } + else + { + node.AddError(); + throw; + } + } + catch (Connection.ReadTimeout crt) + { + if (policy.TimeoutDelay > 0) + { + cluster.RecoverConnection(new ConnectionRecover(conn, node, policy.TimeoutDelay, crt, IsSingle())); + conn = null; + } + else + { + node.CloseConnection(conn); + } + exception = new AerospikeException.Timeout(policy, true); + isClientTimeout = true; + node.AddTimeout(); + } + catch (SocketException se) + { + // Socket errors are considered temporary anomalies. + // Retry after closing connection. + node.CloseConnectionOnError(conn); + + if (se.SocketErrorCode == SocketError.TimedOut) + { + isClientTimeout = true; + node.AddTimeout(); + } + else + { + exception = new AerospikeException.Connection(se); + isClientTimeout = false; + node.AddError(); + } + } + catch (IOException ioe) + { + // IO errors are considered temporary anomalies. Retry. + // Log.info("IOException: " + tranId + ',' + node + ',' + sequence + ',' + iteration); + node.CloseConnection(conn); + exception = new AerospikeException.Connection(ioe); + isClientTimeout = false; + node.AddError(); + } + catch (Exception) + { + // All other exceptions are considered fatal. Do not retry. + // Close socket to flush out possible garbage. Do not put back in pool. + node.CloseConnectionOnError(conn); + node.AddError(); + throw; + } + } + catch (SocketException se) + { + // This exception might happen after initial connection succeeded, but + // user login failed with a socket error. Retry. + if (se.SocketErrorCode == SocketError.TimedOut) + { + isClientTimeout = true; + node.AddTimeout(); + } + else + { + exception = new AerospikeException.Connection(se); + isClientTimeout = false; + node.AddError(); + } + } + catch (IOException ioe) + { + // IO errors are considered temporary anomalies. Retry. + // Log.info("IOException: " + tranId + ',' + node + ',' + sequence + ',' + iteration); + exception = new AerospikeException.Connection(ioe); + isClientTimeout = false; + node.AddError(); + } + catch (Connection.ReadTimeout) + { + // Connection already handled. + exception = new AerospikeException.Timeout(policy, true); + isClientTimeout = true; + node.AddTimeout(); + } + catch (AerospikeException.Connection ce) + { + // Socket connection error has occurred. Retry. + exception = ce; + isClientTimeout = false; + node.AddError(); + } + catch (AerospikeException.Backoff be) + { + // Node is in backoff state. Retry, hopefully on another node. + exception = be; + isClientTimeout = false; + node.AddError(); + } + catch (AerospikeException ae) + { + ae.Node = node; + ae.Policy = policy; + ae.Iteration = iteration; + ae.SetInDoubt(IsWrite(), commandSentCounter); + node.AddError(); + throw; + } + catch (Exception) + { + node.AddError(); + throw; + } + + // Check maxRetries. + if (iteration > maxRetries) + { + break; + } + + if (totalTimeout > 0) + { + // Check for total timeout. + long remaining = (long)deadline.Subtract(DateTime.UtcNow).TotalMilliseconds - policy.sleepBetweenRetries; + + if (remaining <= 0) + { + break; + } + + if (remaining < totalTimeout) + { + totalTimeout = (int)remaining; + + if (socketTimeout > totalTimeout) + { + socketTimeout = totalTimeout; + } + } + } + + if (!isClientTimeout && policy.sleepBetweenRetries > 0) + { + // Sleep before trying again. + Util.Sleep(policy.sleepBetweenRetries); + } + + iteration++; + + if (!PrepareRetry(isClientTimeout || exception.Result != ResultCode.SERVER_NOT_AVAILABLE)) + { + // Batch may be retried in separate commands. + if (RetryBatch(cluster, socketTimeout, totalTimeout, deadline, iteration, commandSentCounter)) + { + // Batch was retried in separate commands. Complete this command. + return; + } + } + + cluster.AddRetry(); + } + + // Retries have been exhausted. Throw last exception. + if (isClientTimeout) + { + exception = new AerospikeException.Timeout(policy, true); + } + exception.Node = node; + exception.Policy = policy; + exception.Iteration = iteration; + exception.SetInDoubt(IsWrite(), commandSentCounter); + throw exception; + } + + protected internal sealed override int SizeBuffer() + { + dataBuffer = ThreadLocalData.GetBuffer(); + + if (dataOffset > dataBuffer.Length) + { + dataBuffer = ThreadLocalData.ResizeBuffer(dataOffset); + } + dataOffset = 0; + return dataBuffer.Length; + } + + protected internal void SizeBuffer(int size) + { + if (size > dataBuffer.Length) + { + dataBuffer = ThreadLocalData.ResizeBuffer(size); + } + } + + protected internal sealed override void End() + { + // Write total size of message. + ulong size = ((ulong)dataOffset - 8) | (CL_MSG_VERSION << 56) | (AS_MSG_TYPE << 48); + ByteUtil.LongToBytes(size, dataBuffer, 0); + } + + protected void ParseHeader(IConnection conn) + { + // Read header. + conn.ReadFully(dataBuffer, 8, Command.STATE_READ_HEADER); + + long sz = ByteUtil.BytesToLong(dataBuffer, 0); + int receiveSize = (int)(sz & 0xFFFFFFFFFFFFL); + + if (receiveSize <= 0) + { + throw new AerospikeException("Invalid receive size: " + receiveSize); + } + + SizeBuffer(receiveSize); + conn.ReadFully(dataBuffer, receiveSize, Command.STATE_READ_DETAIL); + conn.UpdateLastUsed(); + + ulong type = (ulong)(sz >> 48) & 0xff; + + if (type == Command.AS_MSG_TYPE) + { + dataOffset = 5; + } + else if (type == Command.MSG_TYPE_COMPRESSED) + { + int usize = (int)ByteUtil.BytesToLong(dataBuffer, 0); + byte[] ubuf = new byte[usize]; + + ByteUtil.Decompress(dataBuffer, 8, receiveSize, ubuf, usize); + dataBuffer = ubuf; + dataOffset = 13; + } + else + { + throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE); + } + + this.resultCode = dataBuffer[dataOffset] & 0xFF; + dataOffset++; + this.generation = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + this.expiration = ByteUtil.BytesToInt(dataBuffer, dataOffset); + dataOffset += 8; + this.fieldCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + this.opCount = ByteUtil.BytesToShort(dataBuffer, dataOffset); + dataOffset += 2; + } + + protected internal sealed override void SetLength(int length) + { + dataOffset = length; + } + + // Do nothing by default. Write commands will override this method. + protected internal virtual void OnInDoubt() + { + + } + + protected internal virtual bool RetryBatch + ( + Cluster cluster, + int socketTimeout, + int totalTimeout, + DateTime deadline, + int iteration, + int commandSentCounter + ) + { + // Override this method in batch to regenerate node assignments. + return false; + } + + protected internal virtual bool IsWrite() + { + return false; + } + + protected virtual bool IsSingle() + { + return true; + } + + protected internal abstract Node GetNode(); + + protected abstract LatencyType GetLatencyType(); + protected internal abstract void WriteBuffer(); + protected internal abstract void ParseResult(IConnection conn); + protected internal abstract bool PrepareRetry(bool timeout); + } +} diff --git a/AerospikeClient/Command/TouchCommand.cs b/AerospikeClient/Command/TouchCommand.cs index d90a4632..427e81e3 100644 --- a/AerospikeClient/Command/TouchCommand.cs +++ b/AerospikeClient/Command/TouchCommand.cs @@ -1,53 +1,54 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class TouchCommand : SyncWriteCommand - { - public TouchCommand(Cluster cluster, WritePolicy writePolicy, Key key) - : base(cluster, writePolicy, key) - { - } - - protected internal override void WriteBuffer() - { - SetTouch(writePolicy, key); - } - - protected internal override void ParseResult(IConnection conn) - { - ParseHeader(conn); - - if (resultCode == ResultCode.OK) - { - return; - } - - if (resultCode == ResultCode.FILTERED_OUT) - { - if (writePolicy.failOnFilteredOut) - { - throw new AerospikeException(resultCode); - } - return; - } - - throw new AerospikeException(resultCode); - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class TouchCommand : SyncWriteCommand + { + public TouchCommand(Cluster cluster, WritePolicy writePolicy, Key key) + : base(cluster, writePolicy, key) + { + } + + protected internal override void WriteBuffer() + { + SetTouch(writePolicy, key); + } + + protected internal override void ParseResult(IConnection conn) + { + ParseHeader(conn); + ParseFields(policy.Txn, key, true); + + if (resultCode == ResultCode.OK) + { + return; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (writePolicy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return; + } + + throw new AerospikeException(resultCode); + } + } +} diff --git a/AerospikeClient/Command/TxnClose.cs b/AerospikeClient/Command/TxnClose.cs index 6b3fe880..84682bed 100644 --- a/AerospikeClient/Command/TxnClose.cs +++ b/AerospikeClient/Command/TxnClose.cs @@ -1,51 +1,52 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class TxnClose : SyncWriteCommand - { - private readonly Txn txn; - - public TxnClose(Cluster cluster, Txn txn, WritePolicy writePolicy, Key key) - : base(cluster, writePolicy, key) - { - this.txn = txn; - } - - protected internal override void WriteBuffer() - { - SetTxnClose(txn, key); - } - - protected internal override void ParseResult(IConnection conn) - { - ParseHeader(conn); - - if (resultCode == ResultCode.OK || resultCode == ResultCode.KEY_NOT_FOUND_ERROR) - { - return; - } - - throw new AerospikeException(resultCode); - } - - protected internal override void OnInDoubt() - { - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class TxnClose : SyncWriteCommand + { + private readonly Txn txn; + + public TxnClose(Cluster cluster, Txn txn, WritePolicy writePolicy, Key key) + : base(cluster, writePolicy, key) + { + this.txn = txn; + } + + protected internal override void WriteBuffer() + { + SetTxnClose(txn, key); + } + + protected internal override void ParseResult(IConnection conn) + { + ParseHeader(conn); + ParseFields(policy.Txn, key, true); + + if (resultCode == ResultCode.OK || resultCode == ResultCode.KEY_NOT_FOUND_ERROR) + { + return; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnInDoubt() + { + } + } +} diff --git a/AerospikeClient/Command/TxnMarkRollForward.cs b/AerospikeClient/Command/TxnMarkRollForward.cs index e478f9f4..01e9e347 100644 --- a/AerospikeClient/Command/TxnMarkRollForward.cs +++ b/AerospikeClient/Command/TxnMarkRollForward.cs @@ -1,53 +1,51 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class TxnMarkRollForward : SyncWriteCommand - { - private readonly Txn txn; - - public TxnMarkRollForward(Cluster cluster, Txn txn, WritePolicy writePolicy, Key key) - : base(cluster, writePolicy, key) - { - this.txn = txn; - } - - protected internal override void WriteBuffer() - { - SetTxnMarkRollForward(txn, key); - } - - protected internal override void ParseResult(IConnection conn) - { - ParseHeader(conn); - - // BIN_EXISTS_ERROR is considered a success because it means a previous attempt already - // succeeded in notifying the server that the MRT will be rolled forward. - if (resultCode == ResultCode.OK || resultCode == ResultCode.BIN_EXISTS_ERROR) - { - return; - } - - throw new AerospikeException(resultCode); - } - - protected internal override void OnInDoubt() - { - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class TxnMarkRollForward : SyncWriteCommand + { + public TxnMarkRollForward(Cluster cluster, Txn txn, WritePolicy writePolicy, Key key) + : base(cluster, writePolicy, key) + { + } + + protected internal override void WriteBuffer() + { + SetTxnMarkRollForward(key); + } + + protected internal override void ParseResult(IConnection conn) + { + ParseHeader(conn); + ParseFields(policy.Txn, key, true); + + // BIN_EXISTS_ERROR is considered a success because it means a previous attempt already + // succeeded in notifying the server that the MRT will be rolled forward. + if (resultCode == ResultCode.OK || resultCode == ResultCode.BIN_EXISTS_ERROR) + { + return; + } + + throw new AerospikeException(resultCode); + } + + protected internal override void OnInDoubt() + { + } + } +} diff --git a/AerospikeClient/Command/TxnRoll.cs b/AerospikeClient/Command/TxnRoll.cs index 0dd8c3e7..ca6aeed7 100644 --- a/AerospikeClient/Command/TxnRoll.cs +++ b/AerospikeClient/Command/TxnRoll.cs @@ -1,261 +1,271 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -using static Aerospike.Client.CommitStatus; -using static Aerospike.Client.CommitError; -using static Aerospike.Client.AbortStatus; - -namespace Aerospike.Client -{ - public sealed class TxnRoll - { - private readonly Cluster cluster; - private readonly Txn txn; - private BatchRecord[] verifyRecords; - private BatchRecord[] rollRecords; - - public TxnRoll(Cluster cluster, Txn txn) - { - this.cluster = cluster; - this.txn = txn; - } - - public CommitStatusType Commit(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) - { - WritePolicy writePolicy; - Key txnKey; - - try - { - // Verify read versions in batch. - Verify(verifyPolicy); - } - catch (Exception t) - { - // Verify failed. Abort. - try - { - Roll(rollPolicy, Command.INFO4_MRT_ROLL_BACK); - } - catch (Exception t2) - { - // Throw combination of verify and roll exceptions. - //t.InnerException = t2; //TODO: Ask about this - throw new AerospikeException.Commit(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, verifyRecords, rollRecords, t); - } - - if (txn.MonitorMightExist()) - { - try - { - writePolicy = new WritePolicy(rollPolicy); - txnKey = TxnMonitor.GetTxnMonitorKey(txn); - Close(writePolicy, txnKey); - } - catch (Exception t3) - { - // Throw combination of verify and close exceptions. - //t.AddSuppressed(t3); - throw new AerospikeException.Commit(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, verifyRecords, rollRecords, t); - } - } - - // Throw original exception when abort succeeds. - throw new AerospikeException.Commit(CommitErrorType.VERIFY_FAIL, verifyRecords, rollRecords, t); - } - - writePolicy = new WritePolicy(rollPolicy); - txnKey = TxnMonitor.GetTxnMonitorKey(txn); - - if (txn.MonitorExists()) - { - // Tell MRT monitor that a roll-forward will commence. - try - { - MarkRollForward(writePolicy, txnKey); - } - catch (Exception t) - { - throw new AerospikeException.Commit(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, verifyRecords, rollRecords, t); - } - - // Roll-forward writes in batch. - try - { - Roll(rollPolicy, Command.INFO4_MRT_ROLL_FORWARD); - } - catch (Exception t) - { - return CommitStatusType.ROLL_FORWARD_ABANDONED; - } - } - - if (txn.MonitorMightExist()) - { - // Remove MRT monitor. - try - { - Close(writePolicy, txnKey); - } - catch (Exception t) - { - return CommitStatusType.CLOSE_ABANDONED; - } - } - - return CommitStatusType.OK; - } - - public AbortStatusType Abort(BatchPolicy rollPolicy) - { - try - { - Roll(rollPolicy, Command.INFO4_MRT_ROLL_BACK); - } - catch (Exception) - { - return AbortStatusType.ROLL_BACK_ABANDONED; - } - - if (txn.MonitorMightExist()) - { - try - { - WritePolicy writePolicy = new(rollPolicy); - Key txnKey = TxnMonitor.GetTxnMonitorKey(txn); - Close(writePolicy, txnKey); - } - catch (Exception t) - { - return AbortStatusType.CLOSE_ABANDONED; - } - } - - return AbortStatusType.OK; - } - private void Verify(BatchPolicy verifyPolicy) - { - // Validate record versions in a batch. - HashSet> reads = txn.Reads.ToHashSet>(); - int max = reads.Count; - if (max == 0) - { - return; - } - - BatchRecord[] records = new BatchRecord[max]; - Key[] keys = new Key[max]; - long[] versions = new long[max]; - int count = 0; - - foreach (KeyValuePair entry in reads) - { - Key key = entry.Key; - keys[count] = key; - records[count] = new BatchRecord(key, false); - versions[count] = entry.Value; - count++; - } - this.verifyRecords = records; - - BatchStatus status = new(true); - List bns = BatchNode.GenerateList(cluster, verifyPolicy, keys, records, false, status); - BatchCommand[] commands = new BatchCommand[bns.Count]; - - count = 0; - - foreach (BatchNode bn in bns) - { - commands[count++] = new BatchTxnVerify( - cluster, bn, verifyPolicy, txn, keys, versions, records, status); - } - - BatchExecutor.Execute(cluster, verifyPolicy, commands, status); - - if (!status.GetStatus()) - { - throw new AerospikeException("Failed to verify one or more record versions"); - } - } - - private void MarkRollForward(WritePolicy writePolicy, Key txnKey) - { - // Tell MRT monitor that a roll-forward will commence. - TxnMarkRollForward cmd = new(cluster, txn, writePolicy, txnKey); - cmd.Execute(); - } - - private void Roll(BatchPolicy rollPolicy, int txnAttr) - { - HashSet keySet = txn.Writes; - - if (keySet.Count == 0) - { - return; - } - - Key[] keys = keySet.ToArray(); - BatchRecord[] records = new BatchRecord[keys.Length]; - - for (int i = 0; i < keys.Length; i++) - { - records[i] = new BatchRecord(keys[i], true); - } - - this.rollRecords = records; - - // Copy transaction roll policy because it needs to be modified. - BatchPolicy batchPolicy = new(rollPolicy); - - BatchAttr attr = new(); - attr.SetTxn(txnAttr); - BatchStatus status = new(true); - - // generate() requires a null transaction instance. - List bns = BatchNode.GenerateList(cluster, batchPolicy, keys, records, true, status); - BatchCommand[] commands = new BatchCommand[bns.Count]; - - // Batch roll forward requires the transaction instance. - batchPolicy.Txn = txn; - - int count = 0; - - foreach (BatchNode bn in bns) - { - commands[count++] = new BatchTxnRoll( - cluster, bn, batchPolicy, keys, records, attr, status); - } - BatchExecutor.Execute(cluster, batchPolicy, commands, status); - - if (!status.GetStatus()) - { - string rollString = txnAttr == Command.INFO4_MRT_ROLL_FORWARD ? "commit" : "abort"; - throw new AerospikeException("Failed to " + rollString + " one or more records"); - } - } - - private void Close(WritePolicy writePolicy, Key txnKey) - { - // Delete MRT monitor on server. - TxnClose cmd = new(cluster, txn, writePolicy, txnKey); - cmd.Execute(); - - // Reset MRT on client. - txn.Clear(); - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +using static Aerospike.Client.CommitStatus; +using static Aerospike.Client.CommitError; +using static Aerospike.Client.AbortStatus; + +namespace Aerospike.Client +{ + public sealed class TxnRoll + { + private readonly Cluster cluster; + private readonly Txn txn; + private BatchRecord[] verifyRecords; + private BatchRecord[] rollRecords; + + public TxnRoll(Cluster cluster, Txn txn) + { + this.cluster = cluster; + this.txn = txn; + } + + public CommitStatusType Commit(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) + { + WritePolicy writePolicy; + Key txnKey; + + try + { + // Verify read versions in batch. + Verify(verifyPolicy); + } + catch (Exception e) + { + // Verify failed. Abort. + try + { + Roll(rollPolicy, Command.INFO4_MRT_ROLL_BACK); + } + catch (Exception e2) + { + // Throw combination of verify and roll exceptions. + + throw OnCommitError(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, e, false); + } + + if (txn.MonitorMightExist()) + { + try + { + writePolicy = new WritePolicy(rollPolicy); + txnKey = TxnMonitor.GetTxnMonitorKey(txn); + Close(writePolicy, txnKey); + } + catch (Exception e3) + { + // Throw combination of verify and close exceptions. + //t.AddSuppressed(t3); + throw OnCommitError(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, e, false); + } + } + + // Throw original exception when abort succeeds. + throw OnCommitError(CommitErrorType.VERIFY_FAIL, e, false); + } + + writePolicy = new WritePolicy(rollPolicy); + txnKey = TxnMonitor.GetTxnMonitorKey(txn); + + if (txn.MonitorExists()) + { + // Tell MRT monitor that a roll-forward will commence. + try + { + MarkRollForward(writePolicy, txnKey); + } + catch (Exception e) + { + throw OnCommitError(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, e, true); + } + + // Roll-forward writes in batch. + try + { + Roll(rollPolicy, Command.INFO4_MRT_ROLL_FORWARD); + } + catch (Exception) + { + return CommitStatusType.ROLL_FORWARD_ABANDONED; + } + } + + if (txn.MonitorMightExist()) + { + // Remove MRT monitor. + try + { + Close(writePolicy, txnKey); + } + catch (Exception) + { + return CommitStatusType.CLOSE_ABANDONED; + } + } + + return CommitStatusType.OK; + } + + private AerospikeException.Commit OnCommitError(CommitErrorType error, Exception cause, bool setInDoubt) + { + AerospikeException.Commit aec = new(error, verifyRecords, rollRecords, cause); + + if (cause is AerospikeException) { + AerospikeException src = (AerospikeException)cause; + aec.Node = src.Node; + aec.Policy = src.Policy; + aec.Iteration = src.Iteration; + if (setInDoubt) { + aec.SetInDoubt(src.InDoubt); + } + } + return aec; + } + + public AbortStatusType Abort(BatchPolicy rollPolicy) + { + try + { + Roll(rollPolicy, Command.INFO4_MRT_ROLL_BACK); + } + catch (Exception) + { + return AbortStatusType.ROLL_BACK_ABANDONED; + } + + if (txn.MonitorMightExist()) + { + try + { + WritePolicy writePolicy = new(rollPolicy); + Key txnKey = TxnMonitor.GetTxnMonitorKey(txn); + Close(writePolicy, txnKey); + } + catch (Exception) + { + return AbortStatusType.CLOSE_ABANDONED; + } + } + + return AbortStatusType.OK; + } + private void Verify(BatchPolicy verifyPolicy) + { + // Validate record versions in a batch. + HashSet> reads = txn.Reads.ToHashSet>(); + int max = reads.Count; + if (max == 0) + { + return; + } + + BatchRecord[] records = new BatchRecord[max]; + Key[] keys = new Key[max]; + long?[] versions = new long?[max]; + int count = 0; + + foreach (KeyValuePair entry in reads) + { + Key key = entry.Key; + keys[count] = key; + records[count] = new BatchRecord(key, false); + versions[count] = entry.Value; + count++; + } + this.verifyRecords = records; + + BatchStatus status = new(true); + List bns = BatchNode.GenerateList(cluster, verifyPolicy, keys, records, false, status); + BatchCommand[] commands = new BatchCommand[bns.Count]; + + count = 0; + + foreach (BatchNode bn in bns) + { + commands[count++] = new BatchTxnVerify( + cluster, bn, verifyPolicy, keys, versions, records, status); + } + + BatchExecutor.Execute(cluster, verifyPolicy, commands, status); + + if (!status.GetStatus()) + { + throw new AerospikeException("Failed to verify one or more record versions"); + } + } + + private void MarkRollForward(WritePolicy writePolicy, Key txnKey) + { + // Tell MRT monitor that a roll-forward will commence. + TxnMarkRollForward cmd = new(cluster, txn, writePolicy, txnKey); + cmd.Execute(); + } + + private void Roll(BatchPolicy rollPolicy, int txnAttr) + { + HashSet keySet = txn.Writes; + + if (keySet.Count == 0) + { + return; + } + + Key[] keys = keySet.ToArray(); + BatchRecord[] records = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + records[i] = new BatchRecord(keys[i], true); + } + + this.rollRecords = records; + + BatchAttr attr = new(); + attr.SetTxn(txnAttr); + BatchStatus status = new(true); + + // generate() requires a null transaction instance. + List bns = BatchNode.GenerateList(cluster, rollPolicy, keys, records, true, status); + BatchCommand[] commands = new BatchCommand[bns.Count]; + + int count = 0; + + foreach (BatchNode bn in bns) + { + commands[count++] = new BatchTxnRoll( + cluster, bn, rollPolicy, txn, keys, records, attr, status); + } + BatchExecutor.Execute(cluster, rollPolicy, commands, status); + + if (!status.GetStatus()) + { + string rollString = txnAttr == Command.INFO4_MRT_ROLL_FORWARD ? "commit" : "abort"; + throw new AerospikeException("Failed to " + rollString + " one or more records"); + } + } + + private void Close(WritePolicy writePolicy, Key txnKey) + { + // Delete MRT monitor on server. + TxnClose cmd = new(cluster, txn, writePolicy, txnKey); + cmd.Execute(); + + // Reset MRT on client. + txn.Clear(); + } + } +} diff --git a/AerospikeClient/Command/WriteCommand.cs b/AerospikeClient/Command/WriteCommand.cs index 3786edb5..c3c4ace5 100644 --- a/AerospikeClient/Command/WriteCommand.cs +++ b/AerospikeClient/Command/WriteCommand.cs @@ -1,58 +1,59 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -namespace Aerospike.Client -{ - public sealed class WriteCommand : SyncWriteCommand - { - private readonly Bin[] bins; - private readonly Operation.Type operation; - - public WriteCommand(Cluster cluster, WritePolicy writePolicy, Key key, Bin[] bins, Operation.Type operation) - : base(cluster, writePolicy, key) - { - this.bins = bins; - this.operation = operation; - } - - protected internal override void WriteBuffer() - { - SetWrite(writePolicy, operation, key, bins); - } - - protected internal override void ParseResult(IConnection conn) - { - ParseHeader(conn); - - if (resultCode == ResultCode.OK) - { - return; - } - - if (resultCode == ResultCode.FILTERED_OUT) - { - if (writePolicy.failOnFilteredOut) - { - throw new AerospikeException(resultCode); - } - return; - } - - throw new AerospikeException(resultCode); - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public sealed class WriteCommand : SyncWriteCommand + { + private readonly Bin[] bins; + private readonly Operation.Type operation; + + public WriteCommand(Cluster cluster, WritePolicy writePolicy, Key key, Bin[] bins, Operation.Type operation) + : base(cluster, writePolicy, key) + { + this.bins = bins; + this.operation = operation; + } + + protected internal override void WriteBuffer() + { + SetWrite(writePolicy, operation, key, bins); + } + + protected internal override void ParseResult(IConnection conn) + { + ParseHeader(conn); + ParseFields(policy.Txn, key, true); + + if (resultCode == ResultCode.OK) + { + return; + } + + if (resultCode == ResultCode.FILTERED_OUT) + { + if (writePolicy.failOnFilteredOut) + { + throw new AerospikeException(resultCode); + } + return; + } + + throw new AerospikeException(resultCode); + } + } +} diff --git a/AerospikeClient/Main/AerospikeClient.cs b/AerospikeClient/Main/AerospikeClient.cs index 64ad53fd..acf7e9f9 100644 --- a/AerospikeClient/Main/AerospikeClient.cs +++ b/AerospikeClient/Main/AerospikeClient.cs @@ -1,2676 +1,2676 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using System.Reflection; -using System.Text; - -namespace Aerospike.Client -{ - /// - /// Instantiate an AerospikeClient object to access an Aerospike - /// database cluster and perform database operations. - /// - /// This client is thread-safe. One client instance should be used per cluster. - /// Multiple threads should share this cluster instance. - /// - /// - /// Your application uses this class API to perform database operations such as - /// writing and reading records, and selecting sets of records. Write operations - /// include specialized functionality such as append/prepend and arithmetic - /// addition. - /// - /// - /// Each record may have multiple bins, unless the Aerospike server nodes are - /// configured as "single-bin". In "multi-bin" mode, partial records may be - /// written or read by specifying the relevant subset of bins. - /// - /// - public class AerospikeClient : IDisposable, IAerospikeClient - { - //------------------------------------------------------- - // Member variables. - //------------------------------------------------------- - - protected internal Cluster cluster; - - /// - /// Default read policy that is used when read command policy is null. - /// - public Policy readPolicyDefault; - - /// - /// Default write policy that is used when write command policy is null. - /// - public WritePolicy writePolicyDefault; - - /// - /// Default scan policy that is used when scan command policy is null. - /// - public ScanPolicy scanPolicyDefault; - - /// - /// Default query policy that is used when query command policy is null. - /// - public QueryPolicy queryPolicyDefault; - - /// - /// Default parent policy used in batch read commands. Parent policy fields - /// include socketTimeout, totalTimeout, maxRetries, etc... - /// - public BatchPolicy batchPolicyDefault; - - /// - /// Default parent policy used in batch write commands. Parent policy fields - /// include socketTimeout, totalTimeout, maxRetries, etc... - /// - public BatchPolicy batchParentPolicyWriteDefault; - - /// - /// Default write policy used in batch operate commands. - /// Write policy fields include generation, expiration, durableDelete, etc... - /// - public BatchWritePolicy batchWritePolicyDefault; - - /// - /// Default delete policy used in batch delete commands. - /// - public BatchDeletePolicy batchDeletePolicyDefault; - - /// - /// Default user defined function policy used in batch UDF excecute commands. - /// - public BatchUDFPolicy batchUDFPolicyDefault; - - /// - /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. - /// - public TxnVerifyPolicy txnVerifyPolicyDefault; - - /// - /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) - /// or back(abort) in a batch. - /// - public TxnRollPolicy txnRollPolicyDefault; - - /// - /// Default info policy that is used when info command policy is null. - /// - public InfoPolicy infoPolicyDefault; - - protected WritePolicy operatePolicyReadDefault; - - //------------------------------------------------------- - // Constructors - //------------------------------------------------------- - - /// - /// Initialize Aerospike client. - /// If the host connection succeeds, the client will: - /// - /// Add host to the cluster map - /// Request host's list of other nodes in cluster - /// Add these nodes to cluster map - /// - /// - /// If the connection succeeds, the client is ready to process database requests. - /// If the connection fails, the cluster will remain in a disconnected state - /// until the server is activated. - /// - /// - /// host name - /// host port - /// if host connection fails - public AerospikeClient(string hostname, int port) - : this(new ClientPolicy(), new Host(hostname, port)) - { - } - - /// - /// Initialize Aerospike client. - /// The client policy is used to set defaults and size internal data structures. - /// If the host connection succeeds, the client will: - /// - /// Add host to the cluster map - /// Request host's list of other nodes in cluster - /// Add these nodes to cluster map - /// - /// - /// If the connection succeeds, the client is ready to process database requests. - /// If the connection fails and the policy's failOnInvalidHosts is true, a connection - /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state - /// until the server is activated. - /// - /// - /// client configuration parameters, pass in null for defaults - /// host name - /// host port - /// if host connection fails - public AerospikeClient(ClientPolicy policy, string hostname, int port) - : this(policy, new Host(hostname, port)) - { - } - - /// - /// Initialize Aerospike client with suitable hosts to seed the cluster map. - /// The client policy is used to set defaults and size internal data structures. - /// For the first host connection that succeeds, the client will: - /// - /// Add host to the cluster map - /// Request host's list of other nodes in cluster - /// Add these nodes to cluster map - /// - /// - /// In most cases, only one host is necessary to seed the cluster. The remaining hosts - /// are added as future seeds in case of a complete network failure. - /// - /// - /// If one connection succeeds, the client is ready to process database requests. - /// If all connections fail and the policy's failIfNotConnected is true, a connection - /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state - /// until the server is activated. - /// - /// - /// client configuration parameters, pass in null for defaults - /// array of potential hosts to seed the cluster - /// if all host connections fail - public AerospikeClient(ClientPolicy policy, params Host[] hosts) - { - if (policy == null) - { - policy = new ClientPolicy(); - } - this.readPolicyDefault = policy.readPolicyDefault; - this.writePolicyDefault = policy.writePolicyDefault; - this.scanPolicyDefault = policy.scanPolicyDefault; - this.queryPolicyDefault = policy.queryPolicyDefault; - this.batchPolicyDefault = policy.batchPolicyDefault; - this.batchParentPolicyWriteDefault = policy.batchParentPolicyWriteDefault; - this.batchWritePolicyDefault = policy.batchWritePolicyDefault; - this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; - this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; - this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault; - this.txnRollPolicyDefault = policy.txnRollPolicyDefault; - this.infoPolicyDefault = policy.infoPolicyDefault; - this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); - - cluster = new Cluster(policy, hosts); - cluster.InitTendThread(policy.failIfNotConnected); - } - - /// - /// Construct client without initialization. - /// Should only be used by classes inheriting from this client. - /// - protected internal AerospikeClient(ClientPolicy policy) - { - if (policy != null) - { - this.readPolicyDefault = policy.readPolicyDefault; - this.writePolicyDefault = policy.writePolicyDefault; - this.scanPolicyDefault = policy.scanPolicyDefault; - this.queryPolicyDefault = policy.queryPolicyDefault; - this.batchPolicyDefault = policy.batchPolicyDefault; - this.batchParentPolicyWriteDefault = policy.batchParentPolicyWriteDefault; - this.batchWritePolicyDefault = policy.batchWritePolicyDefault; - this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; - this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; - this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault; - this.txnRollPolicyDefault = policy.txnRollPolicyDefault; - this.infoPolicyDefault = policy.infoPolicyDefault; - this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); - } - else - { - this.readPolicyDefault = new Policy(); - this.writePolicyDefault = new WritePolicy(); - this.scanPolicyDefault = new ScanPolicy(); - this.queryPolicyDefault = new QueryPolicy(); - this.batchPolicyDefault = new BatchPolicy(); - this.batchParentPolicyWriteDefault = BatchPolicy.WriteDefault(); - this.batchWritePolicyDefault = new BatchWritePolicy(); - this.batchDeletePolicyDefault = new BatchDeletePolicy(); - this.batchUDFPolicyDefault = new BatchUDFPolicy(); - this.txnVerifyPolicyDefault = new TxnVerifyPolicy(); - this.txnRollPolicyDefault= new TxnRollPolicy(); - this.infoPolicyDefault = new InfoPolicy(); - } - this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); - } - - //------------------------------------------------------- - // Operations policies - //------------------------------------------------------- - - /// - /// Default read policy that is used when read command policy is null. - /// - public Policy ReadPolicyDefault - { - get { return new Policy(readPolicyDefault); } - set { readPolicyDefault = value; } - } - - /// - /// Default write policy that is used when write command policy is null. - /// - public WritePolicy WritePolicyDefault - { - get { return new WritePolicy(writePolicyDefault); } - set { writePolicyDefault = value; } - } - - /// - /// Default scan policy that is used when scan command policy is null. - /// - public ScanPolicy ScanPolicyDefault - { - get { return new ScanPolicy(scanPolicyDefault); } - set { scanPolicyDefault = value; } - } - - /// - /// Default query policy that is used when query command policy is null. - /// - public QueryPolicy QueryPolicyDefault - { - get { return new QueryPolicy(queryPolicyDefault); } - set { queryPolicyDefault = value; } - } - - /// - /// Default parent policy used in batch read commands.Parent policy fields - /// include socketTimeout, totalTimeout, maxRetries, etc... - /// - public BatchPolicy BatchPolicyDefault - { - get { return new BatchPolicy(batchPolicyDefault); } - set { batchPolicyDefault = value; } - } - - /// - /// Default parent policy used in batch write commands. Parent policy fields - /// include socketTimeout, totalTimeout, maxRetries, etc... - /// - public BatchPolicy BatchParentPolicyWriteDefault - { - get { return new BatchPolicy(batchParentPolicyWriteDefault); } - set { batchParentPolicyWriteDefault = value; } - } - - /// - /// Default write policy used in batch operate commands. - /// Write policy fields include generation, expiration, durableDelete, etc... - /// - public BatchWritePolicy BatchWritePolicyDefault - { - get { return new BatchWritePolicy(batchWritePolicyDefault); } - set { batchWritePolicyDefault = value; } - } - - /// - /// Default delete policy used in batch delete commands. - /// - public BatchDeletePolicy BatchDeletePolicyDefault - { - get { return new BatchDeletePolicy(batchDeletePolicyDefault); } - set { batchDeletePolicyDefault = value; } - } - - /// - /// Default user defined function policy used in batch UDF excecute commands. - /// - public BatchUDFPolicy BatchUDFPolicyDefault - { - get { return new BatchUDFPolicy(batchUDFPolicyDefault); } - set { batchUDFPolicyDefault = value; } - } - - /// - /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. - /// - public TxnVerifyPolicy TxnVerifyPolicyDefault - { - get { return new TxnVerifyPolicy(txnVerifyPolicyDefault); } - set { txnVerifyPolicyDefault = value; } - } - - /// - /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) - /// or back(abort) in a batch. - /// - public TxnRollPolicy TxnRollPolicyDefault - { - get { return new TxnRollPolicy(txnRollPolicyDefault); } - set { txnRollPolicyDefault = value; } - } - - /// - /// Default info policy that is used when info command policy is null. - /// - public InfoPolicy InfoPolicyDefault - { - get { return infoPolicyDefault; } - set { infoPolicyDefault = value; } - } - - //------------------------------------------------------- - // Cluster Connection Management - //------------------------------------------------------- - - public bool Disposed { get; private set; } - private void Dispose(bool disposing) - { - if (!Disposed) - { - if (disposing) - { - this.Close(); - } - - Disposed = true; - } - } - - /// - /// Close all client connections to database server nodes. - /// - public void Dispose() - { - // Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method - Dispose(disposing: true); - GC.SuppressFinalize(this); - } - - /// - /// Close all client connections to database server nodes. - /// - public void Close() - { - cluster.Close(); - } - - /// - /// Return if we are ready to talk to the database server cluster. - /// - public bool Connected - { - get - { - return cluster.Connected; - } - } - - /// - /// Cluster associated with this AerospikeClient instance. - /// - public Cluster Cluster - { - get - { - return cluster; - } - } - - /// - /// Return array of active server nodes in the cluster. - /// - public Node[] Nodes - { - get - { - return cluster.Nodes; - } - } - - /// - /// Enable extended periodic cluster and node latency metrics. - /// - public void EnableMetrics(MetricsPolicy metricsPolicy) - { - cluster.EnableMetrics(metricsPolicy); - } - - /// - /// Disable extended periodic cluster and node latency metrics. - /// - public void DisableMetrics() - { - cluster.DisableMetrics(); - } - - /// - /// Return operating cluster statistics snapshot. - /// - public ClusterStats GetClusterStats() - { - return cluster.GetStats(); - } - - //------------------------------------------------------- - // Multi-Record Transactions - //------------------------------------------------------- - - /// - /// Attempt to commit the given multi-record transaction. First, the expected record versions are - /// sent to the server nodes for verification. If all nodes return success, the transaction is - /// committed. Otherwise, the transaction is aborted. - ///

    - /// Requires server version 8.0+ - ///

    - ///
    - /// multi-record transaction - public CommitStatus.CommitStatusType Commit(Txn txn) - { - if (!txn.SetRollAttempted()) - { - return CommitStatus.CommitStatusType.ALREADY_ATTEMPTED; - } - - TxnRoll tr = new(cluster, txn); - return tr.Commit(txnVerifyPolicyDefault, txnRollPolicyDefault); - } - - /// - /// Abort and rollback the given multi-record transaction. - ///

    - /// Requires server version 8.0+ - ///

    - ///
    - /// multi-record transaction - public AbortStatus.AbortStatusType Abort(Txn txn) - { - if (!txn.SetRollAttempted()) - { - return AbortStatus.AbortStatusType.ALREADY_ATTEMPTED; - } - - TxnRoll tr = new(cluster, txn); - return tr.Abort(txnRollPolicyDefault); - } - - //------------------------------------------------------- - // Write Record Operations - //------------------------------------------------------- - - /// - /// Write record bin(s). - /// The policy specifies the command timeouts, record expiration and how the command is - /// handled when the record already exists. - /// - /// write configuration parameters, pass in null for defaults - /// unique record identifier - /// array of bin name/value pairs - /// if write fails - public void Put(WritePolicy policy, Key key, params Bin[] bins) - { - if (policy == null) - { - policy = writePolicyDefault; - } - - if (policy.Txn != null) - { - TxnMonitor.AddKey(cluster, policy, key); - } - - WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.WRITE); - command.Execute(); - } - - //------------------------------------------------------- - // String Operations - //------------------------------------------------------- - - /// - /// Append bin string values to existing record bin values. - /// The policy specifies the command timeout, record expiration and how the command is - /// handled when the record already exists. - /// This call only works for string values. - /// - /// write configuration parameters, pass in null for defaults - /// unique record identifier - /// array of bin name/value pairs - /// if append fails - public void Append(WritePolicy policy, Key key, params Bin[] bins) - { - if (policy == null) - { - policy = writePolicyDefault; - } - - if (policy.Txn != null) - { - TxnMonitor.AddKey(cluster, policy, key); - } - - WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.APPEND); - command.Execute(); - } - - /// - /// Prepend bin string values to existing record bin values. - /// The policy specifies the command timeout, record expiration and how the command is - /// handled when the record already exists. - /// This call works only for string values. - /// - /// write configuration parameters, pass in null for defaults - /// unique record identifier - /// array of bin name/value pairs - /// if prepend fails - public void Prepend(WritePolicy policy, Key key, params Bin[] bins) - { - if (policy == null) - { - policy = writePolicyDefault; - } - - if (policy.Txn != null) - { - TxnMonitor.AddKey(cluster, policy, key); - } - - WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.PREPEND); - command.Execute(); - } - - //------------------------------------------------------- - // Arithmetic Operations - //------------------------------------------------------- - - /// - /// Add integer/double bin values to existing record bin values. - /// The policy specifies the command timeout, record expiration and how the command is - /// handled when the record already exists. - /// - /// write configuration parameters, pass in null for defaults - /// unique record identifier - /// array of bin name/value pairs - /// if add fails - public void Add(WritePolicy policy, Key key, params Bin[] bins) - { - if (policy == null) - { - policy = writePolicyDefault; - } - - if (policy.Txn != null) - { - TxnMonitor.AddKey(cluster, policy, key); - } - - WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.ADD); - command.Execute(); - } - - //------------------------------------------------------- - // Delete Operations - //------------------------------------------------------- - - /// - /// Delete record for specified key. - /// Return whether record existed on server before deletion. - /// The policy specifies the command timeout. - /// - /// delete configuration parameters, pass in null for defaults - /// unique record identifier - /// if delete fails - public bool Delete(WritePolicy policy, Key key) - { - if (policy == null) - { - policy = writePolicyDefault; - } - - if (policy.Txn != null) - { - TxnMonitor.AddKey(cluster, policy, key); - } - - DeleteCommand command = new DeleteCommand(cluster, policy, key); - command.Execute(); - return command.Existed(); - } - - /// - /// Delete records for specified keys. If a key is not found, the corresponding result - /// will be . - /// - /// Requires server version 6.0+ - /// - /// - /// batch configuration parameters, pass in null for defaults - /// delete configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// which contains results for keys that did complete - public BatchResults Delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePolicy, Key[] keys) - { - if (keys.Length == 0) - { - return new BatchResults(new BatchRecord[0], true); - } - - if (batchPolicy == null) - { - batchPolicy = batchParentPolicyWriteDefault; - } - - if (deletePolicy == null) - { - deletePolicy = batchDeletePolicyDefault; - } - - if (batchPolicy.Txn != null) - { - TxnMonitor.AddKeys(cluster, batchPolicy, keys); - } - - BatchAttr attr = new BatchAttr(); - attr.SetDelete(deletePolicy); - - BatchRecord[] records = new BatchRecord[keys.Length]; - - for (int i = 0; i < keys.Length; i++) - { - records[i] = new BatchRecord(keys[i], attr.hasWrite); - } - - try - { - BatchStatus status = new BatchStatus(true); - List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, null, records, attr, status); - } - - BatchExecutor.Execute(cluster, batchPolicy, commands, status); - return new BatchResults(records, status.GetStatus()); - } - catch (Exception e) - { - // Batch terminated on fatal error. - throw new AerospikeException.BatchRecordArray(records, e); - } - } - - /// - /// Remove records in specified namespace/set efficiently. This method is many orders of magnitude - /// faster than deleting records one at a time. - /// - /// See https://www.aerospike.com/docs/reference/info#truncate - /// - /// - /// This asynchronous server call may return before the truncation is complete. The user can still - /// write new records after the server returns because new records will have last update times - /// greater than the truncate cutoff (set at the time of truncate call). - /// - /// - /// info command configuration parameters, pass in null for defaults - /// required namespace - /// optional set name. Pass in null to delete all sets in namespace. - /// - /// optionally delete records before record last update time. - /// If specified, value must be before the current time. - /// Pass in null to delete all records in namespace/set regardless of last update time. - /// - public void Truncate(InfoPolicy policy, string ns, string set, DateTime? beforeLastUpdate) - { - if (policy == null) - { - policy = infoPolicyDefault; - } - - // Send truncate command to one node. That node will distribute the command to other nodes. - Node node = cluster.GetRandomNode(); - - StringBuilder sb = new StringBuilder(200); - - if (set != null) - { - sb.Append("truncate:namespace="); - sb.Append(ns); - sb.Append(";set="); - sb.Append(set); - } - else - { - sb.Append("truncate-namespace:namespace="); - sb.Append(ns); - } - - if (beforeLastUpdate.HasValue) - { - sb.Append(";lut="); - // Convert to nanoseconds since unix epoch. - sb.Append(Util.NanosFromEpoch(beforeLastUpdate.Value)); - } - - string response = Info.Request(policy, node, sb.ToString()); - - if (!response.Equals("ok", StringComparison.CurrentCultureIgnoreCase)) - { - throw new AerospikeException("Truncate failed: " + response); - } - } - - //------------------------------------------------------- - // Touch Operations - //------------------------------------------------------- - - /// - /// Reset record's time to expiration using the policy's expiration. - /// Fail if the record does not exist. - /// - /// write configuration parameters, pass in null for defaults - /// unique record identifier - /// if touch fails - public void Touch(WritePolicy policy, Key key) - { - if (policy == null) - { - policy = writePolicyDefault; - } - - if (policy.Txn != null) - { - TxnMonitor.AddKey(cluster, policy, key); - } - - TouchCommand command = new TouchCommand(cluster, policy, key); - command.Execute(); - } - - //------------------------------------------------------- - // Existence-Check Operations - //------------------------------------------------------- - - /// - /// Determine if a record key exists. - /// Return whether record exists or not. - /// The policy can be used to specify timeouts. - /// - /// generic configuration parameters, pass in null for defaults - /// unique record identifier - /// if command fails - public bool Exists(Policy policy, Key key) - { - if (policy == null) - { - policy = readPolicyDefault; - } - - if (policy.Txn != null) - { - policy.Txn.SetNamespace(key.ns); - } - - ExistsCommand command = new ExistsCommand(cluster, policy, key); - command.Execute(); - return command.Exists(); - } - - /// - /// Check if multiple record keys exist in one batch call. - /// The returned boolean array is in positional order with the original key array order. - /// - /// batch configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// which contains results for keys that did complete - public bool[] Exists(BatchPolicy policy, Key[] keys) - { - if (keys.Length == 0) - { - return new bool[0]; - } - - if (policy == null) - { - policy = batchPolicyDefault; - } - - if (policy.Txn != null) - { - policy.Txn.SetNamespace(keys); - } - - bool[] existsArray = new bool[keys.Length]; - - try - { - BatchStatus status = new BatchStatus(false); - - if (policy.allowProleReads) - { - // Send all requests to a single random node. - Node node = cluster.GetRandomNode(); - BatchNode batchNode = new BatchNode(node, keys); - BatchCommand command = new BatchExistsArrayCommand(cluster, batchNode, policy, keys, existsArray, status); - BatchExecutor.Execute(command, status); - return existsArray; - } - - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchExistsArrayCommand(cluster, batchNode, policy, keys, existsArray, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return existsArray; - } - catch (Exception e) - { - throw new AerospikeException.BatchExists(existsArray, e); - } - } - - //------------------------------------------------------- - // Read Record Operations - //------------------------------------------------------- - - /// - /// Read entire record for specified key. - /// If found, return record instance. If not found, return null. - /// The policy can be used to specify timeouts. - /// - /// generic configuration parameters, pass in null for defaults - /// unique record identifier - /// if read fails - public Record Get(Policy policy, Key key) - { - if (policy == null) - { - policy = readPolicyDefault; - } - - policy.Txn?.SetNamespace(key.ns); - - ReadCommand command = new ReadCommand(cluster, policy, key); - command.Execute(); - return command.Record; - } - - /// - /// Read record header and bins for specified key. - /// If found, return record instance. If not found, return null. - /// The policy can be used to specify timeouts. - /// - /// generic configuration parameters, pass in null for defaults - /// unique record identifier - /// bins to retrieve - /// if read fails - public Record Get(Policy policy, Key key, params string[] binNames) - { - if (policy == null) - { - policy = readPolicyDefault; - } - - if (policy.Txn != null) - { - policy.Txn.SetNamespace(key.ns); - } - - ReadCommand command = new ReadCommand(cluster, policy, key, binNames); - command.Execute(); - return command.Record; - } - - /// - /// Read record generation and expiration only for specified key. Bins are not read. - /// If found, return record instance. If not found, return null. - /// The policy can be used to specify timeouts. - /// - /// generic configuration parameters, pass in null for defaults - /// unique record identifier - /// if read fails - public Record GetHeader(Policy policy, Key key) - { - if (policy == null) - { - policy = readPolicyDefault; - } - - if (policy.Txn != null) - { - policy.Txn.SetNamespace(key.ns); - } - - ReadHeaderCommand command = new ReadHeaderCommand(cluster, policy, key); - command.Execute(); - return command.Record; - } - - //------------------------------------------------------- - // Batch Read Operations - //------------------------------------------------------- - - /// - /// Read multiple records for specified batch keys in one batch call. - /// This method allows different namespaces/bins to be requested for each key in the batch. - /// The returned records are located in the same list. - /// If the BatchRead key field is not found, the corresponding record field will be null. - /// - /// batch configuration parameters, pass in null for defaults - /// list of unique record identifiers and the bins to retrieve. - /// The returned records are located in the same list. - /// true if all batch key requests succeeded - /// if read fails - public bool Get(BatchPolicy policy, List records) - { - if (records.Count == 0) - { - return true; - } - - if (policy == null) - { - policy = batchPolicyDefault; - } - - if (policy.Txn != null) - { - policy.Txn.SetNamespace(records); - } - - BatchStatus status = new BatchStatus(true); - List batchNodes = BatchNode.GenerateList(cluster, policy, records, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchReadListCommand(cluster, batchNode, policy, records, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return status.GetStatus(); - } - - /// - /// Read multiple records for specified keys in one batch call. - /// The returned records are in positional order with the original key array order. - /// If a key is not found, the positional record will be null. - /// - /// batch configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// which contains results for keys that did complete - public Record[] Get(BatchPolicy policy, Key[] keys) - { - if (keys.Length == 0) - { - return new Record[0]; - } - - if (policy == null) - { - policy = batchPolicyDefault; - } - - - if (policy.Txn != null) - { - policy.Txn.SetNamespace(keys); - } - - - Record[] records = new Record[keys.Length]; - - try - { - BatchStatus status = new BatchStatus(false); - - if (policy.allowProleReads) - { - // Send all requests to a single random node. - Node node = cluster.GetRandomNode(); - BatchNode batchNode = new BatchNode(node, keys); - BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_GET_ALL, false, status); - BatchExecutor.Execute(command, status); - return records; - } - - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_GET_ALL, false, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return records; - } - catch (Exception e) - { - throw new AerospikeException.BatchRecords(records, e); - } - } - - /// - /// Read multiple record headers and bins for specified keys in one batch call. - /// The returned records are in positional order with the original key array order. - /// If a key is not found, the positional record will be null. - /// - /// batch configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// array of bins to retrieve - /// which contains results for keys that did complete - public Record[] Get(BatchPolicy policy, Key[] keys, params string[] binNames) - { - if (keys.Length == 0) - { - return new Record[0]; - } - - if (policy == null) - { - policy = batchPolicyDefault; - } - - if (policy.Txn != null) - { - policy.Txn.SetNamespace(keys); - } - - Record[] records = new Record[keys.Length]; - - try - { - BatchStatus status = new BatchStatus(false); - - if (policy.allowProleReads) - { - // Send all requests to a single random node. - Node node = cluster.GetRandomNode(); - BatchNode batchNode = new BatchNode(node, keys); - BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, binNames, null, records, Command.INFO1_READ, false, status); - BatchExecutor.Execute(command, status); - return records; - } - - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, binNames, null, records, Command.INFO1_READ, false, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return records; - } - catch (Exception e) - { - throw new AerospikeException.BatchRecords(records, e); - } - } - - /// - /// Read multiple records for specified keys using read operations in one batch call. - /// The returned records are in positional order with the original key array order. - /// If a key is not found, the positional record will be null. - /// - /// batch configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// array of read operations on record - /// which contains results for keys that did complete - public Record[] Get(BatchPolicy policy, Key[] keys, params Operation[] ops) - { - if (keys.Length == 0) - { - return new Record[0]; - } - - if (policy == null) - { - policy = batchPolicyDefault; - } - - if (policy.Txn != null) - { - policy.Txn.SetNamespace(keys); - } - - Record[] records = new Record[keys.Length]; - - try - { - BatchStatus status = new BatchStatus(false); - - if (policy.allowProleReads) - { - // Send all requests to a single random node. - Node node = cluster.GetRandomNode(); - BatchNode batchNode = new BatchNode(node, keys); - BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, ops, records, Command.INFO1_READ, true, status); - BatchExecutor.Execute(command, status); - return records; - } - - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, ops, records, Command.INFO1_READ, true, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return records; - } - catch (Exception e) - { - throw new AerospikeException.BatchRecords(records, e); - } - } - - /// - /// Read multiple record header data for specified keys in one batch call. - /// The returned records are in positional order with the original key array order. - /// If a key is not found, the positional record will be null. - /// - /// batch configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// which contains results for keys that did complete - public Record[] GetHeader(BatchPolicy policy, Key[] keys) - { - if (keys.Length == 0) - { - return new Record[0]; - } - - if (policy == null) - { - policy = batchPolicyDefault; - } - - - if (policy.Txn != null) - { - policy.Txn.SetNamespace(keys); - } - - - Record[] records = new Record[keys.Length]; - - try - { - BatchStatus status = new BatchStatus(false); - - if (policy.allowProleReads) - { - // Send all requests to a single random node. - Node node = cluster.GetRandomNode(); - BatchNode batchNode = new BatchNode(node, keys); - BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_NOBINDATA, false, status); - BatchExecutor.Execute(command, status); - return records; - } - - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_NOBINDATA, false, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return records; - } - catch (Exception e) - { - throw new AerospikeException.BatchRecords(records, e); - } - } - - //------------------------------------------------------- - // Join methods - //------------------------------------------------------- - - /// - /// Read specified bins in left record and then join with right records. Each join bin name - /// (Join.leftKeysBinName) must exist in the left record. The join bin must contain a list of - /// keys. Those key are used to retrieve other records using a separate batch get. - /// - /// generic configuration parameters, pass in null for defaults - /// unique main record identifier - /// array of bins to retrieve - /// array of join definitions - /// if main read or join reads fail - public Record Join(BatchPolicy policy, Key key, string[] binNames, params Join[] joins) - { - string[] names = new string[binNames.Length + joins.Length]; - int count = 0; - - foreach (string binName in binNames) - { - names[count++] = binName; - } - - foreach (Join join in joins) - { - names[count++] = join.leftKeysBinName; - } - Record record = Get(policy, key, names); - JoinRecords(policy, record, joins); - return record; - } - - /// - /// Read all bins in left record and then join with right records. Each join bin name - /// (Join.binNameKeys) must exist in the left record. The join bin must contain a list of - /// keys. Those key are used to retrieve other records using a separate batch get. - /// - /// generic configuration parameters, pass in null for defaults - /// unique main record identifier - /// array of join definitions - /// if main read or join reads fail - public Record Join(BatchPolicy policy, Key key, params Join[] joins) - { - Record record = Get(policy, key); - JoinRecords(policy, record, joins); - return record; - } - - //------------------------------------------------------- - // Generic Database Operations - //------------------------------------------------------- - - /// - /// Perform multiple read/write operations on a single key in one batch call. - /// An example would be to add an integer value to an existing record and then - /// read the result, all in one database call. - /// - /// The server executes operations in the same order as the operations array. - /// Both scalar bin operations (Operation) and CDT bin operations (ListOperation, - /// MapOperation) can be performed in same call. - /// - /// - /// write configuration parameters, pass in null for defaults - /// unique record identifier - /// database operations to perform - /// if command fails - public Record Operate(WritePolicy policy, Key key, params Operation[] operations) - { - OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, operations); - - if (args.hasWrite) - { - policy = args.writePolicy; - - if (policy.Txn != null) - { - TxnMonitor.AddKey(cluster, policy, key); - } - - OperateCommandWrite command = new(cluster, key, args); - command.Execute(); - return command.Record; - } - else - { - if (policy?.Txn != null) - { - policy.Txn.SetNamespace(key.ns); - } - - OperateCommandRead command = new(cluster, key, args); - command.Execute(); - return command.Record; - } - } - - //------------------------------------------------------- - // Batch Read/Write Operations - //------------------------------------------------------- - - /// - /// Read/Write multiple records for specified batch keys in one batch call. - /// This method allows different namespaces/bins for each key in the batch. - /// The returned records are located in the same list. - /// - /// can be , , or - /// . - /// - /// - /// Requires server version 6.0+ - /// - /// - /// batch configuration parameters, pass in null for defaults - /// list of unique record identifiers and read/write operations - /// true if all batch sub-commands succeeded - /// if command fails - public bool Operate(BatchPolicy policy, List records) - { - if (records.Count == 0) - { - return true; - } - - if (policy == null) - { - policy = batchParentPolicyWriteDefault; - } - - if (policy.Txn != null) - { - TxnMonitor.AddKeys(cluster, policy, records); - } - - BatchStatus status = new BatchStatus(true); - List batchNodes = BatchNode.GenerateList(cluster, policy, records, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchOperateListCommand(cluster, batchNode, policy, records, status); - } - BatchExecutor.Execute(cluster, policy, commands, status); - return status.GetStatus(); - } - - /// - /// Perform read/write operations on multiple keys. If a key is not found, the corresponding result - /// will be . - /// - /// Requires server version 6.0+ - /// - /// - /// batch configuration parameters, pass in null for defaults - /// write configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// - /// read/write operations to perform. is not allowed because it returns a - /// variable number of bins and makes it difficult (sometimes impossible) to lineup operations with - /// results. Instead, use for each bin name. - /// - /// which contains results for keys that did complete - public BatchResults Operate(BatchPolicy batchPolicy, BatchWritePolicy writePolicy, Key[] keys, params Operation[] ops) - { - if (keys.Length == 0) - { - return new BatchResults(new BatchRecord[0], true); - } - - if (batchPolicy == null) - { - batchPolicy = batchParentPolicyWriteDefault; - } - - if (writePolicy == null) - { - writePolicy = batchWritePolicyDefault; - } - - if (batchPolicy.Txn != null) - { - TxnMonitor.AddKeys(cluster, batchPolicy, keys); - } - - BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops); - BatchRecord[] records = new BatchRecord[keys.Length]; - - for (int i = 0; i < keys.Length; i++) - { - records[i] = new BatchRecord(keys[i], attr.hasWrite); - } - - try - { - BatchStatus status = new BatchStatus(true); - List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, ops, records, attr, status); - } - - BatchExecutor.Execute(cluster, batchPolicy, commands, status); - return new BatchResults(records, status.GetStatus()); - } - catch (Exception e) - { - throw new AerospikeException.BatchRecordArray(records, e); - } - } - - //------------------------------------------------------- - // Scan Operations - //------------------------------------------------------- - - /// - /// Read all records in specified namespace and set. If the policy's - /// concurrentNodes is specified, each server node will be read in - /// parallel. Otherwise, server nodes are read in series. - /// - /// This call will block until the scan is complete - callbacks are made - /// within the scope of this call. - /// - /// - /// scan configuration parameters, pass in null for defaults - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// read callback method - called with record data - /// - /// optional bin to retrieve. All bins will be returned if not specified. - /// - /// if scan fails - public void ScanAll(ScanPolicy policy, string ns, string setName, ScanCallback callback, params string[] binNames) - { - if (policy == null) - { - policy = scanPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - PartitionTracker tracker = new PartitionTracker(policy, nodes); - ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker); - } - - /// - /// Read all records in specified namespace and set for one node only. - /// The node is specified by name. - /// - /// This call will block until the scan is complete - callbacks are made - /// within the scope of this call. - /// - /// - /// scan configuration parameters, pass in null for defaults - /// server node name - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// read callback method - called with record data - /// - /// optional bin to retrieve. All bins will be returned if not specified. - /// - /// if scan fails - public void ScanNode(ScanPolicy policy, string nodeName, string ns, string setName, ScanCallback callback, params string[] binNames) - { - Node node = cluster.GetNode(nodeName); - ScanNode(policy, node, ns, setName, callback, binNames); - } - - /// - /// Read all records in specified namespace and set for one node only. - /// - /// This call will block until the scan is complete - callbacks are made - /// within the scope of this call. - /// - /// - /// scan configuration parameters, pass in null for defaults - /// server node - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// read callback method - called with record data - /// - /// optional bin to retrieve. All bins will be returned if not specified. - /// - /// if scan fails - public void ScanNode(ScanPolicy policy, Node node, string ns, string setName, ScanCallback callback, params string[] binNames) - { - if (policy == null) - { - policy = scanPolicyDefault; - } - - PartitionTracker tracker = new PartitionTracker(policy, node); - ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker); - } - - /// - /// Read records in specified namespace, set and partition filter. - /// - /// This call will block until the scan is complete - callbacks are made - /// within the scope of this call. - /// - /// - /// scan configuration parameters, pass in null for defaults - /// filter on a subset of data partitions - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// read callback method - called with record data - /// optional bin to retrieve. All bins will be returned if not specified. - /// if scan fails - public void ScanPartitions(ScanPolicy policy, PartitionFilter partitionFilter, string ns, string setName, ScanCallback callback, params string[] binNames) - { - if (policy == null) - { - policy = scanPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - PartitionTracker tracker = new PartitionTracker(policy, nodes, partitionFilter); - ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker); - } - - //--------------------------------------------------------------- - // User defined functions - //--------------------------------------------------------------- - - /// - /// Register package located in a file containing user defined functions with server. - /// This asynchronous server call will return before command is complete. - /// The user can optionally wait for command completion by using the returned - /// RegisterTask instance. - /// - /// generic configuration parameters, pass in null for defaults - /// path of client file containing user defined functions, relative to current directory - /// path to store user defined functions on the server, relative to configured script directory. - /// language of user defined functions - /// if register fails - public RegisterTask Register(Policy policy, string clientPath, string serverPath, Language language) - { - if (policy == null) - { - policy = writePolicyDefault; - } - string content = Util.ReadFileEncodeBase64(clientPath); - return RegisterCommand.Register(cluster, policy, content, serverPath, language); - } - - /// - /// Register package located in a resource containing user defined functions with server. - /// This asynchronous server call will return before command is complete. - /// The user can optionally wait for command completion by using the returned - /// RegisterTask instance. - /// - /// generic configuration parameters, pass in null for defaults - /// assembly where resource is located. Current assembly can be obtained by: Assembly.GetExecutingAssembly() - /// namespace path where Lua resource is located. Example: Aerospike.Client.Resources.mypackage.lua - /// path to store user defined functions on the server, relative to configured script directory. - /// language of user defined functions - /// if register fails - public RegisterTask Register(Policy policy, Assembly resourceAssembly, string resourcePath, string serverPath, Language language) - { - if (policy == null) - { - policy = writePolicyDefault; - } - string content; - using (Stream stream = resourceAssembly.GetManifestResourceStream(resourcePath)) - { - byte[] bytes = new byte[stream.Length]; - stream.Read(bytes, 0, bytes.Length); - content = Convert.ToBase64String(bytes); - } - return RegisterCommand.Register(cluster, policy, content, serverPath, language); - } - - /// - /// Register UDF functions located in a code string with server. Example: - /// - /// String code = @" - /// local function reducer(val1,val2) - /// return val1 + val2 - /// end - /// - /// function sum_single_bin(stream,name) - /// local function mapper(rec) - /// return rec[name] - /// end - /// return stream : map(mapper) : reduce(reducer) - /// end - ///"; - /// - /// client.RegisterUdfString(null, code, "mysum.lua", Language.LUA); - /// - /// - /// This asynchronous server call will return before command is complete. - /// The user can optionally wait for command completion by using the returned - /// RegisterTask instance. - /// - /// - /// generic configuration parameters, pass in null for defaults - /// code string containing user defined functions - /// path to store user defined functions on the server, relative to configured script directory. - /// language of user defined functions - /// if register fails - public RegisterTask RegisterUdfString(Policy policy, string code, string serverPath, Language language) - { - if (policy == null) - { - policy = writePolicyDefault; - } - byte[] bytes = ByteUtil.StringToUtf8(code); - string content = Convert.ToBase64String(bytes); - return RegisterCommand.Register(cluster, policy, content, serverPath, language); - } - - /// - /// Remove user defined function from server nodes. - /// - /// info configuration parameters, pass in null for defaults - /// location of UDF on server nodes. Example: mylua.lua - /// if remove fails - public void RemoveUdf(InfoPolicy policy, string serverPath) - { - if (policy == null) - { - policy = infoPolicyDefault; - } - // Send UDF command to one node. That node will distribute the UDF command to other nodes. - string command = "udf-remove:filename=" + serverPath; - Node node = cluster.GetRandomNode(); - string response = Info.Request(policy, node, command); - - if (response.Equals("ok", StringComparison.CurrentCultureIgnoreCase)) - { - return; - } - - if (response.StartsWith("error=file_not_found")) - { - // UDF has already been removed. - return; - } - throw new AerospikeException("Remove UDF failed: " + response); - } - - /// - /// Execute user defined function on server and return results. - /// The function operates on a single record. - /// The package name is used to locate the udf file location: - /// - /// udf file = <server udf dir>/<package name>.lua - /// - /// - /// generic configuration parameters, pass in null for defaults - /// unique record identifier - /// server package name where user defined function resides - /// user defined function - /// arguments passed in to user defined function - /// if command fails - public object Execute(WritePolicy policy, Key key, string packageName, string functionName, params Value[] args) - { - if (policy == null) - { - policy = writePolicyDefault; - } - - if (policy.Txn != null) - { - TxnMonitor.AddKey(cluster, policy, key); - } - - ExecuteCommand command = new ExecuteCommand(cluster, policy, key, packageName, functionName, args); - command.Execute(); - - Record record = command.Record; - - if (record == null || record.bins == null) - { - return null; - } - - IDictionary map = record.bins; - object obj; - - if (map.TryGetValue("SUCCESS", out obj)) - { - return obj; - } - - if (map.TryGetValue("FAILURE", out obj)) - { - throw new AerospikeException(obj.ToString()); - } - throw new AerospikeException("Invalid UDF return value"); - } - - /// - /// Execute user defined function on server for each key and return results. - /// The package name is used to locate the udf file location: - /// - /// udf file = <server udf dir>/<package name>.lua - /// - /// - /// Requires server version 6.0+ - /// - /// - /// batch configuration parameters, pass in null for defaults - /// udf configuration parameters, pass in null for defaults - /// array of unique record identifiers - /// server package name where user defined function resides - /// user defined function - /// arguments passed in to user defined function - /// which contains results for keys that did complete - public BatchResults Execute(BatchPolicy batchPolicy, BatchUDFPolicy udfPolicy, Key[] keys, string packageName, string functionName, params Value[] functionArgs) - { - if (keys.Length == 0) - { - return new BatchResults(new BatchRecord[0], true); - } - - if (batchPolicy == null) - { - batchPolicy = batchParentPolicyWriteDefault; - } - - if (udfPolicy == null) - { - udfPolicy = batchUDFPolicyDefault; - } - - if (batchPolicy.Txn != null) - { - TxnMonitor.AddKeys(cluster, batchPolicy, keys); - } - - byte[] argBytes = Packer.Pack(functionArgs); - - BatchAttr attr = new BatchAttr(); - attr.SetUDF(udfPolicy); - - BatchRecord[] records = new BatchRecord[keys.Length]; - - for (int i = 0; i < keys.Length; i++) - { - records[i] = new BatchRecord(keys[i], attr.hasWrite); - } - - try - { - BatchStatus status = new BatchStatus(true); - List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status); - BatchCommand[] commands = new BatchCommand[batchNodes.Count]; - int count = 0; - - foreach (BatchNode batchNode in batchNodes) - { - commands[count++] = new BatchUDFCommand(cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr, status); - } - - BatchExecutor.Execute(cluster, batchPolicy, commands, status); - return new BatchResults(records, status.GetStatus()); - } - catch (Exception e) - { - // Batch terminated on fatal error. - throw new AerospikeException.BatchRecordArray(records, e); - } - } - - //---------------------------------------------------------- - // Query/Execute - //---------------------------------------------------------- - - /// - /// Apply user defined function on records that match the background query statement filter. - /// Records are not returned to the client. - /// This asynchronous server call will return before the command is complete. - /// The user can optionally wait for command completion by using the returned - /// ExecuteTask instance. - /// - /// configuration parameters, pass in null for defaults - /// background query definition - /// server package where user defined function resides - /// function name - /// to pass to function name, if any - /// if command fails - public ExecuteTask Execute(WritePolicy policy, Statement statement, string packageName, string functionName, params Value[] functionArgs) - { - if (policy == null) - { - policy = writePolicyDefault; - } - - statement.PackageName = packageName; - statement.FunctionName = functionName; - statement.FunctionArgs = functionArgs; - - cluster.AddCommandCount(); - - ulong taskId = statement.PrepareTaskId(); - Node[] nodes = cluster.ValidateNodes(); - Executor executor = new Executor(nodes.Length); - - foreach (Node node in nodes) - { - ServerCommand command = new ServerCommand(cluster, node, policy, statement, taskId); - executor.AddCommand(command); - } - - executor.Execute(nodes.Length); - return new ExecuteTask(cluster, policy, statement, taskId); - } - - /// - /// Apply operations on records that match the background query statement filter. - /// Records are not returned to the client. - /// This asynchronous server call will return before the command is complete. - /// The user can optionally wait for command completion by using the returned - /// ExecuteTask instance. - /// - /// write configuration parameters, pass in null for defaults - /// background query definition - /// list of operations to be performed on selected records - /// if command fails - public ExecuteTask Execute(WritePolicy policy, Statement statement, params Operation[] operations) - { - if (policy == null) - { - policy = writePolicyDefault; - } - - if (operations.Length > 0) - { - statement.Operations = operations; - } - - cluster.AddCommandCount(); - - ulong taskId = statement.PrepareTaskId(); - Node[] nodes = cluster.ValidateNodes(); - Executor executor = new Executor(nodes.Length); - - foreach (Node node in nodes) - { - ServerCommand command = new ServerCommand(cluster, node, policy, statement, taskId); - executor.AddCommand(command); - } - executor.Execute(nodes.Length); - return new ExecuteTask(cluster, policy, statement, taskId); - } - - //-------------------------------------------------------- - // Query functions - //-------------------------------------------------------- - - /// - /// Execute query and call action for each record returned from server. - /// - /// generic configuration parameters, pass in null for defaults - /// query definition - /// action methods to be called for each record - /// if query fails - public void Query(QueryPolicy policy, Statement statement, Action action) - { - using (RecordSet rs = Query(policy, statement)) - { - while (rs.Next()) - { - action(rs.Key, rs.Record); - } - } - } - - /// - /// Execute query and return record iterator. The query executor puts records on a queue in - /// separate threads. The calling thread concurrently pops records off the queue through the - /// record iterator. - /// - /// generic configuration parameters, pass in null for defaults - /// query definition - /// if query fails - public RecordSet Query(QueryPolicy policy, Statement statement) - { - if (policy == null) - { - policy = queryPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - - if (cluster.hasPartitionQuery || statement.filter == null) - { - PartitionTracker tracker = new PartitionTracker(policy, statement, nodes); - QueryPartitionExecutor executor = new QueryPartitionExecutor(cluster, policy, statement, nodes.Length, tracker); - return executor.RecordSet; - } - else - { - QueryRecordExecutor executor = new QueryRecordExecutor(cluster, policy, statement, nodes); - executor.Execute(); - return executor.RecordSet; - } - } - - /// - /// Execute query on all server nodes and return records via the listener. This method will - /// block until the query is complete. Listener callbacks are made within the scope of this call. - /// - /// If is not 1, the supplied listener must handle - /// shared data in a thread-safe manner, because the listener will be called by multiple query - /// threads (one thread per node) in parallel. - /// - /// - /// Requires server version 6.0+ if using a secondary index query. - /// - /// - /// query configuration parameters, pass in null for defaults - /// query definition - /// where to send results - /// if query fails - public void Query(QueryPolicy policy, Statement statement, QueryListener listener) - { - if (policy == null) - { - policy = queryPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - - if (cluster.hasPartitionQuery || statement.filter == null) - { - PartitionTracker tracker = new PartitionTracker(policy, statement, nodes); - QueryListenerExecutor.execute(cluster, policy, statement, listener, tracker); - } - else - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Query by partition is not supported"); - } - } - - /// - /// Execute query for specified partitions and return records via the listener. This method will - /// block until the query is complete. Listener callbacks are made within the scope of this call. - /// - /// If is not 1, the supplied listener must handle - /// shared data in a thread-safe manner, because the listener will be called by multiple query - /// threads (one thread per node) in parallel. - /// - /// - /// The completion status of all partitions is stored in the partitionFilter when the query terminates. - /// This partitionFilter can then be used to resume an incomplete query at a later time. - /// This is the preferred method for query terminate/resume functionality. - /// - /// - /// Requires server version 6.0+ if using a secondary index query. - /// - /// - /// query configuration parameters, pass in null for defaults - /// query definition - /// - /// data partition filter. Set to for all partitions. - /// - /// where to send results - /// if query fails - public void Query - ( - QueryPolicy policy, - Statement statement, - PartitionFilter partitionFilter, - QueryListener listener - ) - { - if (policy == null) - { - policy = queryPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - - if (cluster.hasPartitionQuery || statement.filter == null) - { - PartitionTracker tracker = new PartitionTracker(policy, statement, nodes, partitionFilter); - QueryListenerExecutor.execute(cluster, policy, statement, listener, tracker); - } - else - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Query by partition is not supported"); - } - } - - /// - /// Execute query for specified partitions and return record iterator. The query executor puts - /// records on a queue in separate threads. The calling thread concurrently pops records off - /// the queue through the record iterator. - /// - /// Requires server version 6.0+ if using a secondary index query. - /// - /// - /// query configuration parameters, pass in null for defaults - /// query definition - /// filter on a subset of data partitions - /// if query fails - public RecordSet QueryPartitions - ( - QueryPolicy policy, - Statement statement, - PartitionFilter partitionFilter - ) - { - if (policy == null) - { - policy = queryPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - - if (cluster.hasPartitionQuery || statement.filter == null) - { - PartitionTracker tracker = new PartitionTracker(policy, statement, nodes, partitionFilter); - QueryPartitionExecutor executor = new QueryPartitionExecutor(cluster, policy, statement, nodes.Length, tracker); - return executor.RecordSet; - } - else - { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, "QueryPartitions() not supported"); - } - } - - /// - /// Execute query, apply statement's aggregation function, and return result iterator. - /// The aggregation function should be located in a Lua script file that can be found from the - /// "LuaConfig.PackagePath" paths static variable. The default package path is "udf/?.lua" - /// where "?" is the packageName. - /// - /// The query executor puts results on a queue in separate threads. The calling thread - /// concurrently pops results off the queue through the ResultSet iterator. - /// The aggregation function is called on both server and client (final reduce). - /// Therefore, the Lua script file must also reside on both server and client. - /// - /// - /// query configuration parameters, pass in null for defaults - /// query definition - /// server package where user defined function resides - /// aggregation function name - /// arguments to pass to function name, if any - /// if query fails - public ResultSet QueryAggregate - ( - QueryPolicy policy, - Statement statement, - string packageName, - string functionName, - params Value[] functionArgs - ) - { - statement.SetAggregateFunction(packageName, functionName, functionArgs); - return QueryAggregate(policy, statement); - } - - /// - /// Execute query, apply statement's aggregation function, call action for each aggregation - /// object returned from server. - /// - /// query configuration parameters, pass in null for defaults - /// - /// query definition with aggregate functions already initialized by SetAggregateFunction(). - /// - /// action methods to be called for each aggregation object - /// if query fails - public void QueryAggregate(QueryPolicy policy, Statement statement, Action action) - { - using (ResultSet rs = QueryAggregate(policy, statement)) - { - while (rs.Next()) - { - action(rs.Object); - } - } - } - - /// - /// Execute query, apply statement's aggregation function, and return result iterator. - /// The aggregation function should be initialized via the statement's SetAggregateFunction() - /// and should be located in a Lua resource file located in an assembly. - /// - /// The query executor puts results on a queue in separate threads. The calling thread - /// concurrently pops results off the queue through the ResultSet iterator. - /// The aggregation function is called on both server and client (final reduce). - /// Therefore, the Lua script file must also reside on both server and client. - /// - /// - /// query configuration parameters, pass in null for defaults - /// - /// query definition with aggregate functions already initialized by SetAggregateFunction(). - /// - /// if query fails - public ResultSet QueryAggregate(QueryPolicy policy, Statement statement) - { - if (policy == null) - { - policy = queryPolicyDefault; - } - - Node[] nodes = cluster.ValidateNodes(); - QueryAggregateExecutor executor = new QueryAggregateExecutor(cluster, policy, statement, nodes); - executor.Execute(); - return executor.ResultSet; - } - - //-------------------------------------------------------- - // Secondary Index functions - //-------------------------------------------------------- - - /// - /// Create scalar secondary index. - /// This asynchronous server call will return before command is complete. - /// The user can optionally wait for command completion by using the returned - /// IndexTask instance. - /// - /// generic configuration parameters, pass in null for defaults - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// name of secondary index - /// bin name that data is indexed on - /// underlying data type of secondary index - /// if index create fails - public IndexTask CreateIndex - ( - Policy policy, - string ns, - string setName, - string indexName, - string binName, - IndexType indexType - ) - { - return CreateIndex(policy, ns, setName, indexName, binName, indexType, IndexCollectionType.DEFAULT); - } - - /// - /// Create complex secondary index on bins containing collections. - /// This asynchronous server call will return before command is complete. - /// The user can optionally wait for command completion by using the returned - /// IndexTask instance. - /// - /// generic configuration parameters, pass in null for defaults - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// name of secondary index - /// bin name that data is indexed on - /// underlying data type of secondary index - /// index collection type - /// optional context to index on elements within a CDT - /// if index create fails - public IndexTask CreateIndex - ( - Policy policy, - string ns, - string setName, - string indexName, - string binName, - IndexType indexType, - IndexCollectionType indexCollectionType, - params CTX[] ctx - ) - { - if (policy == null) - { - policy = writePolicyDefault; - } - - StringBuilder sb = new StringBuilder(1024); - sb.Append("sindex-create:ns="); - sb.Append(ns); - - if (setName != null && setName.Length > 0) - { - sb.Append(";set="); - sb.Append(setName); - } - - sb.Append(";indexname="); - sb.Append(indexName); - - if (ctx != null && ctx.Length > 0) - { - byte[] bytes = PackUtil.Pack(ctx); - string base64 = Convert.ToBase64String(bytes); - - sb.Append(";context="); - sb.Append(base64); - } - - if (indexCollectionType != IndexCollectionType.DEFAULT) - { - sb.Append(";indextype="); - sb.Append(indexCollectionType); - } - - sb.Append(";indexdata="); - sb.Append(binName); - sb.Append(','); - sb.Append(indexType); - - // Send index command to one node. That node will distribute the command to other nodes. - String response = SendInfoCommand(policy, sb.ToString()); - - if (response.Equals("OK", StringComparison.CurrentCultureIgnoreCase)) - { - // Return task that could optionally be polled for completion. - return new IndexTask(cluster, policy, ns, indexName, true); - } - - ParseInfoError("Create index failed", response); - return null; - } - - /// - /// Delete secondary index. - /// This asynchronous server call will return before command is complete. - /// The user can optionally wait for command completion by using the returned - /// IndexTask instance. - /// - /// generic configuration parameters, pass in null for defaults - /// namespace - equivalent to database name - /// optional set name - equivalent to database table - /// name of secondary index - /// if index drop fails - public IndexTask DropIndex(Policy policy, string ns, string setName, string indexName) - { - if (policy == null) - { - policy = writePolicyDefault; - } - StringBuilder sb = new StringBuilder(500); - sb.Append("sindex-delete:ns="); - sb.Append(ns); - - if (setName != null && setName.Length > 0) - { - sb.Append(";set="); - sb.Append(setName); - } - sb.Append(";indexname="); - sb.Append(indexName); - - // Send index command to one node. That node will distribute the command to other nodes. - String response = SendInfoCommand(policy, sb.ToString()); - - if (response.Equals("OK", StringComparison.CurrentCultureIgnoreCase)) - { - return new IndexTask(cluster, policy, ns, indexName, false); - } - - ParseInfoError("Drop index failed", response); - return null; - } - - //----------------------------------------------------------------- - // XDR - Cross datacenter replication - //----------------------------------------------------------------- - - /// - /// Set XDR filter for given datacenter name and namespace. The expression filter indicates - /// which records XDR should ship to the datacenter. - /// - /// info configuration parameters, pass in null for defaults - /// XDR datacenter name - /// namespace - equivalent to database name - /// expression filter - /// if command fails - public void SetXDRFilter(InfoPolicy policy, string datacenter, string ns, Expression filter) - { - if (policy == null) - { - policy = infoPolicyDefault; - } - - // Send XDR command to one node. That node will distribute the XDR command to other nodes. - string command = "xdr-set-filter:dc=" + datacenter + ";namespace=" + ns + ";exp=" + filter.GetBase64(); - Node node = cluster.GetRandomNode(); - string response = Info.Request(policy, node, command); - - if (response.Equals("ok", StringComparison.CurrentCultureIgnoreCase)) - { - return; - } - - ParseInfoError("xdr-set-filter failed", response); - } - - //------------------------------------------------------- - // User administration - //------------------------------------------------------- - - /// - /// Create user with password and roles. Clear-text password will be hashed using bcrypt - /// before sending to server. - /// - /// admin configuration parameters, pass in null for defaults - /// user name - /// user password in clear-text format - /// variable arguments array of role names. Predefined roles are listed in Role.cs - public void CreateUser(AdminPolicy policy, string user, string password, IList roles) - { - string hash = AdminCommand.HashPassword(password); - AdminCommand command = new AdminCommand(); - command.CreateUser(cluster, policy, user, hash, roles); - } - - /// - /// Remove user from cluster. - /// - /// admin configuration parameters, pass in null for defaults - /// user name - public void DropUser(AdminPolicy policy, string user) - { - AdminCommand command = new AdminCommand(); - command.DropUser(cluster, policy, user); - } - - /// - /// Change user's password. - /// - /// admin configuration parameters, pass in null for defaults - /// user name - /// user password in clear-text format - public void ChangePassword(AdminPolicy policy, string user, string password) - { - if (cluster.user == null) - { - throw new AerospikeException("Invalid user"); - } - - byte[] userBytes = ByteUtil.StringToUtf8(user); - byte[] passwordBytes = ByteUtil.StringToUtf8(password); - - string hash = AdminCommand.HashPassword(password); - byte[] hashBytes = ByteUtil.StringToUtf8(hash); - - AdminCommand command = new AdminCommand(); - - if (Util.ByteArrayEquals(userBytes, cluster.user)) - { - // Change own password. - command.ChangePassword(cluster, policy, userBytes, hash); - } - else - { - // Change other user's password by user admin. - command.SetPassword(cluster, policy, userBytes, hash); - } - cluster.ChangePassword(userBytes, passwordBytes, hashBytes); - } - - /// - /// Add roles to user's list of roles. - /// - /// admin configuration parameters, pass in null for defaults - /// user name - /// role names. Predefined roles are listed in Role.cs - public void GrantRoles(AdminPolicy policy, string user, IList roles) - { - AdminCommand command = new AdminCommand(); - command.GrantRoles(cluster, policy, user, roles); - } - - /// - /// Remove roles from user's list of roles. - /// - /// admin configuration parameters, pass in null for defaults - /// user name - /// role names. Predefined roles are listed in Role.cs - public void RevokeRoles(AdminPolicy policy, string user, IList roles) - { - AdminCommand command = new AdminCommand(); - command.RevokeRoles(cluster, policy, user, roles); - } - - /// - /// Create user defined role. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// privileges assigned to the role. - /// if command fails - public void CreateRole(AdminPolicy policy, string roleName, IList privileges) - { - AdminCommand command = new AdminCommand(); - command.CreateRole(cluster, policy, roleName, privileges); - } - - /// - /// Create user defined role with optional privileges and whitelist. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// optional list of privileges assigned to role. - /// - /// optional list of allowable IP addresses assigned to role. - /// IP addresses can contain wildcards (ie. 10.1.2.0/24). - /// - /// if command fails - public void CreateRole(AdminPolicy policy, string roleName, IList privileges, IList whitelist) - { - AdminCommand command = new AdminCommand(); - command.CreateRole(cluster, policy, roleName, privileges, whitelist, 0, 0); - } - - /// - /// Create user defined role with optional privileges, whitelist and read/write quotas. - /// Quotas require server security configuration "enable-quotas" to be set to true. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// optional list of privileges assigned to role. - /// - /// optional list of allowable IP addresses assigned to role. - /// IP addresses can contain wildcards (ie. 10.1.2.0/24). - /// - /// optional maximum reads per second limit, pass in zero for no limit. - /// optional maximum writes per second limit, pass in zero for no limit. - /// if command fails - public void CreateRole - ( - AdminPolicy policy, - string roleName, - IList privileges, - IList whitelist, - int readQuota, - int writeQuota - ) - { - AdminCommand command = new AdminCommand(); - command.CreateRole(cluster, policy, roleName, privileges, whitelist, readQuota, writeQuota); - } - - /// - /// Drop user defined role. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// if command fails - public void DropRole(AdminPolicy policy, string roleName) - { - AdminCommand command = new AdminCommand(); - command.DropRole(cluster, policy, roleName); - } - - /// - /// Grant privileges to an user defined role. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// privileges assigned to the role. - /// if command fails - public void GrantPrivileges(AdminPolicy policy, string roleName, IList privileges) - { - AdminCommand command = new AdminCommand(); - command.GrantPrivileges(cluster, policy, roleName, privileges); - } - - /// - /// Revoke privileges from an user defined role. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// privileges assigned to the role. - /// if command fails - public void RevokePrivileges(AdminPolicy policy, string roleName, IList privileges) - { - AdminCommand command = new AdminCommand(); - command.RevokePrivileges(cluster, policy, roleName, privileges); - } - - /// - /// Set IP address whitelist for a role. If whitelist is null or empty, remove existing whitelist from role. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// - /// list of allowable IP addresses or null. - /// IP addresses can contain wildcards (ie. 10.1.2.0/24). - /// - /// if command fails - public void SetWhitelist(AdminPolicy policy, string roleName, IList whitelist) - { - AdminCommand command = new AdminCommand(); - command.SetWhitelist(cluster, policy, roleName, whitelist); - } - - /// - /// Set maximum reads/writes per second limits for a role. If a quota is zero, the limit is removed. - /// Quotas require server security configuration "enable-quotas" to be set to true. - /// - /// admin configuration parameters, pass in null for defaults - /// role name - /// maximum reads per second limit, pass in zero for no limit. - /// maximum writes per second limit, pass in zero for no limit. - /// if command fails - public void SetQuotas(AdminPolicy policy, string roleName, int readQuota, int writeQuota) - { - AdminCommand command = new AdminCommand(); - command.setQuotas(cluster, policy, roleName, readQuota, writeQuota); - } - - /// - /// Retrieve roles for a given user. - /// - /// admin configuration parameters, pass in null for defaults - /// user name filter - public User QueryUser(AdminPolicy policy, string user) - { - AdminCommand.UserCommand command = new AdminCommand.UserCommand(1); - return command.QueryUser(cluster, policy, user); - } - - /// - /// Retrieve all users and their roles. - /// - /// admin configuration parameters, pass in null for defaults - public List QueryUsers(AdminPolicy policy) - { - AdminCommand.UserCommand command = new AdminCommand.UserCommand(100); - return command.QueryUsers(cluster, policy); - } - - /// - /// Retrieve role definition. - /// - /// admin configuration parameters, pass in null for defaults - /// role name filter - /// if command fails - public Role QueryRole(AdminPolicy policy, string roleName) - { - AdminCommand.RoleCommand command = new AdminCommand.RoleCommand(1); - return command.QueryRole(cluster, policy, roleName); - } - - /// - /// Retrieve all roles. - /// - /// admin configuration parameters, pass in null for defaults - /// if command fails - public List QueryRoles(AdminPolicy policy) - { - AdminCommand.RoleCommand command = new AdminCommand.RoleCommand(100); - return command.QueryRoles(cluster, policy); - } - - //------------------------------------------------------- - // Internal Methods - //------------------------------------------------------- - - private string SendInfoCommand(Policy policy, string command) - { - Node node = cluster.GetRandomNode(); - Connection conn = node.GetConnection(policy.socketTimeout); - Info info; - - try - { - info = new Info(conn, command); - node.PutConnection(conn); - } - catch (Exception) - { - node.CloseConnectionOnError(conn); - throw; - } - return info.GetValue(); - } - - private void ParseInfoError(string prefix, string response) - { - string message = prefix + ": " + response; - string[] list = response.Split(':'); - - if (list.Length >= 2 && list[0].Equals("FAIL")) - { - int code = 0; - - try - { - code = Convert.ToInt32(list[1]); - } - catch (Exception) - { - } - throw new AerospikeException(code, message); - } - throw new AerospikeException(message); - } - - private void JoinRecords(BatchPolicy policy, Record record, Join[] joins) - { - if (record == null) - { - return; - } - - foreach (Join join in joins) - { - List keyList = (List)record.GetValue(join.leftKeysBinName); - - if (keyList != null) - { - Key[] keyArray = new Key[keyList.Count]; - int count = 0; - - foreach (object obj in keyList) - { - Value value = Value.Get(obj); - keyArray[count++] = new Key(join.rightNamespace, join.rightSetName, value); - } - - Record[] records; - if (join.rightBinNames == null || join.rightBinNames.Length == 0) - { - records = Get(policy, keyArray); - } - else - { - records = Get(policy, keyArray, join.rightBinNames); - } - record.bins[join.leftKeysBinName] = records; - } - } - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using System.Reflection; +using System.Text; + +namespace Aerospike.Client +{ + /// + /// Instantiate an AerospikeClient object to access an Aerospike + /// database cluster and perform database operations. + /// + /// This client is thread-safe. One client instance should be used per cluster. + /// Multiple threads should share this cluster instance. + /// + /// + /// Your application uses this class API to perform database operations such as + /// writing and reading records, and selecting sets of records. Write operations + /// include specialized functionality such as append/prepend and arithmetic + /// addition. + /// + /// + /// Each record may have multiple bins, unless the Aerospike server nodes are + /// configured as "single-bin". In "multi-bin" mode, partial records may be + /// written or read by specifying the relevant subset of bins. + /// + /// + public class AerospikeClient : IDisposable, IAerospikeClient + { + //------------------------------------------------------- + // Member variables. + //------------------------------------------------------- + + protected internal Cluster cluster; + + /// + /// Default read policy that is used when read command policy is null. + /// + protected Policy readPolicyDefault; + + /// + /// Default write policy that is used when write command policy is null. + /// + protected WritePolicy writePolicyDefault; + + /// + /// Default scan policy that is used when scan command policy is null. + /// + protected ScanPolicy scanPolicyDefault; + + /// + /// Default query policy that is used when query command policy is null. + /// + protected QueryPolicy queryPolicyDefault; + + /// + /// Default parent policy used in batch read commands. Parent policy fields + /// include socketTimeout, totalTimeout, maxRetries, etc... + /// + protected BatchPolicy batchPolicyDefault; + + /// + /// Default parent policy used in batch write commands. Parent policy fields + /// include socketTimeout, totalTimeout, maxRetries, etc... + /// + protected BatchPolicy batchParentPolicyWriteDefault; + + /// + /// Default write policy used in batch operate commands. + /// Write policy fields include generation, expiration, durableDelete, etc... + /// + protected BatchWritePolicy batchWritePolicyDefault; + + /// + /// Default delete policy used in batch delete commands. + /// + protected BatchDeletePolicy batchDeletePolicyDefault; + + /// + /// Default user defined function policy used in batch UDF excecute commands. + /// + protected BatchUDFPolicy batchUDFPolicyDefault; + + /// + /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// + protected TxnVerifyPolicy txnVerifyPolicyDefault; + + /// + /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) + /// or back(abort) in a batch. + /// + protected TxnRollPolicy txnRollPolicyDefault; + + /// + /// Default info policy that is used when info command policy is null. + /// + protected InfoPolicy infoPolicyDefault; + + protected WritePolicy operatePolicyReadDefault; + + //------------------------------------------------------- + // Constructors + //------------------------------------------------------- + + /// + /// Initialize Aerospike client. + /// If the host connection succeeds, the client will: + /// + /// Add host to the cluster map + /// Request host's list of other nodes in cluster + /// Add these nodes to cluster map + /// + /// + /// If the connection succeeds, the client is ready to process database requests. + /// If the connection fails, the cluster will remain in a disconnected state + /// until the server is activated. + /// + /// + /// host name + /// host port + /// if host connection fails + public AerospikeClient(string hostname, int port) + : this(new ClientPolicy(), new Host(hostname, port)) + { + } + + /// + /// Initialize Aerospike client. + /// The client policy is used to set defaults and size internal data structures. + /// If the host connection succeeds, the client will: + /// + /// Add host to the cluster map + /// Request host's list of other nodes in cluster + /// Add these nodes to cluster map + /// + /// + /// If the connection succeeds, the client is ready to process database requests. + /// If the connection fails and the policy's failOnInvalidHosts is true, a connection + /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state + /// until the server is activated. + /// + /// + /// client configuration parameters, pass in null for defaults + /// host name + /// host port + /// if host connection fails + public AerospikeClient(ClientPolicy policy, string hostname, int port) + : this(policy, new Host(hostname, port)) + { + } + + /// + /// Initialize Aerospike client with suitable hosts to seed the cluster map. + /// The client policy is used to set defaults and size internal data structures. + /// For the first host connection that succeeds, the client will: + /// + /// Add host to the cluster map + /// Request host's list of other nodes in cluster + /// Add these nodes to cluster map + /// + /// + /// In most cases, only one host is necessary to seed the cluster. The remaining hosts + /// are added as future seeds in case of a complete network failure. + /// + /// + /// If one connection succeeds, the client is ready to process database requests. + /// If all connections fail and the policy's failIfNotConnected is true, a connection + /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state + /// until the server is activated. + /// + /// + /// client configuration parameters, pass in null for defaults + /// array of potential hosts to seed the cluster + /// if all host connections fail + public AerospikeClient(ClientPolicy policy, params Host[] hosts) + { + if (policy == null) + { + policy = new ClientPolicy(); + } + this.readPolicyDefault = policy.readPolicyDefault; + this.writePolicyDefault = policy.writePolicyDefault; + this.scanPolicyDefault = policy.scanPolicyDefault; + this.queryPolicyDefault = policy.queryPolicyDefault; + this.batchPolicyDefault = policy.batchPolicyDefault; + this.batchParentPolicyWriteDefault = policy.batchParentPolicyWriteDefault; + this.batchWritePolicyDefault = policy.batchWritePolicyDefault; + this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; + this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; + this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault; + this.txnRollPolicyDefault = policy.txnRollPolicyDefault; + this.infoPolicyDefault = policy.infoPolicyDefault; + this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); + + cluster = new Cluster(policy, hosts); + cluster.InitTendThread(policy.failIfNotConnected); + } + + /// + /// Construct client without initialization. + /// Should only be used by classes inheriting from this client. + /// + protected internal AerospikeClient(ClientPolicy policy) + { + if (policy != null) + { + this.readPolicyDefault = policy.readPolicyDefault; + this.writePolicyDefault = policy.writePolicyDefault; + this.scanPolicyDefault = policy.scanPolicyDefault; + this.queryPolicyDefault = policy.queryPolicyDefault; + this.batchPolicyDefault = policy.batchPolicyDefault; + this.batchParentPolicyWriteDefault = policy.batchParentPolicyWriteDefault; + this.batchWritePolicyDefault = policy.batchWritePolicyDefault; + this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; + this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; + this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault; + this.txnRollPolicyDefault = policy.txnRollPolicyDefault; + this.infoPolicyDefault = policy.infoPolicyDefault; + this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); + } + else + { + this.readPolicyDefault = new Policy(); + this.writePolicyDefault = new WritePolicy(); + this.scanPolicyDefault = new ScanPolicy(); + this.queryPolicyDefault = new QueryPolicy(); + this.batchPolicyDefault = new BatchPolicy(); + this.batchParentPolicyWriteDefault = BatchPolicy.WriteDefault(); + this.batchWritePolicyDefault = new BatchWritePolicy(); + this.batchDeletePolicyDefault = new BatchDeletePolicy(); + this.batchUDFPolicyDefault = new BatchUDFPolicy(); + this.txnVerifyPolicyDefault = new TxnVerifyPolicy(); + this.txnRollPolicyDefault= new TxnRollPolicy(); + this.infoPolicyDefault = new InfoPolicy(); + } + this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); + } + + //------------------------------------------------------- + // Operations policies + //------------------------------------------------------- + + /// + /// Default read policy that is used when read command policy is null. + /// + public Policy ReadPolicyDefault + { + get { return new Policy(readPolicyDefault); } + set { readPolicyDefault = value; } + } + + /// + /// Default write policy that is used when write command policy is null. + /// + public WritePolicy WritePolicyDefault + { + get { return new WritePolicy(writePolicyDefault); } + set { writePolicyDefault = value; } + } + + /// + /// Default scan policy that is used when scan command policy is null. + /// + public ScanPolicy ScanPolicyDefault + { + get { return new ScanPolicy(scanPolicyDefault); } + set { scanPolicyDefault = value; } + } + + /// + /// Default query policy that is used when query command policy is null. + /// + public QueryPolicy QueryPolicyDefault + { + get { return new QueryPolicy(queryPolicyDefault); } + set { queryPolicyDefault = value; } + } + + /// + /// Default parent policy used in batch read commands.Parent policy fields + /// include socketTimeout, totalTimeout, maxRetries, etc... + /// + public BatchPolicy BatchPolicyDefault + { + get { return new BatchPolicy(batchPolicyDefault); } + set { batchPolicyDefault = value; } + } + + /// + /// Default parent policy used in batch write commands. Parent policy fields + /// include socketTimeout, totalTimeout, maxRetries, etc... + /// + public BatchPolicy BatchParentPolicyWriteDefault + { + get { return new BatchPolicy(batchParentPolicyWriteDefault); } + set { batchParentPolicyWriteDefault = value; } + } + + /// + /// Default write policy used in batch operate commands. + /// Write policy fields include generation, expiration, durableDelete, etc... + /// + public BatchWritePolicy BatchWritePolicyDefault + { + get { return new BatchWritePolicy(batchWritePolicyDefault); } + set { batchWritePolicyDefault = value; } + } + + /// + /// Default delete policy used in batch delete commands. + /// + public BatchDeletePolicy BatchDeletePolicyDefault + { + get { return new BatchDeletePolicy(batchDeletePolicyDefault); } + set { batchDeletePolicyDefault = value; } + } + + /// + /// Default user defined function policy used in batch UDF excecute commands. + /// + public BatchUDFPolicy BatchUDFPolicyDefault + { + get { return new BatchUDFPolicy(batchUDFPolicyDefault); } + set { batchUDFPolicyDefault = value; } + } + + /// + /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// + public TxnVerifyPolicy TxnVerifyPolicyDefault + { + get { return new TxnVerifyPolicy(txnVerifyPolicyDefault); } + set { txnVerifyPolicyDefault = value; } + } + + /// + /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) + /// or back(abort) in a batch. + /// + public TxnRollPolicy TxnRollPolicyDefault + { + get { return new TxnRollPolicy(txnRollPolicyDefault); } + set { txnRollPolicyDefault = value; } + } + + /// + /// Default info policy that is used when info command policy is null. + /// + public InfoPolicy InfoPolicyDefault + { + get { return infoPolicyDefault; } + set { infoPolicyDefault = value; } + } + + //------------------------------------------------------- + // Cluster Connection Management + //------------------------------------------------------- + + public bool Disposed { get; private set; } + private void Dispose(bool disposing) + { + if (!Disposed) + { + if (disposing) + { + this.Close(); + } + + Disposed = true; + } + } + + /// + /// Close all client connections to database server nodes. + /// + public void Dispose() + { + // Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method + Dispose(disposing: true); + GC.SuppressFinalize(this); + } + + /// + /// Close all client connections to database server nodes. + /// + public void Close() + { + cluster.Close(); + } + + /// + /// Return if we are ready to talk to the database server cluster. + /// + public bool Connected + { + get + { + return cluster.Connected; + } + } + + /// + /// Cluster associated with this AerospikeClient instance. + /// + public Cluster Cluster + { + get + { + return cluster; + } + } + + /// + /// Return array of active server nodes in the cluster. + /// + public Node[] Nodes + { + get + { + return cluster.Nodes; + } + } + + /// + /// Enable extended periodic cluster and node latency metrics. + /// + public void EnableMetrics(MetricsPolicy metricsPolicy) + { + cluster.EnableMetrics(metricsPolicy); + } + + /// + /// Disable extended periodic cluster and node latency metrics. + /// + public void DisableMetrics() + { + cluster.DisableMetrics(); + } + + /// + /// Return operating cluster statistics snapshot. + /// + public ClusterStats GetClusterStats() + { + return cluster.GetStats(); + } + + //------------------------------------------------------- + // Multi-Record Transactions + //------------------------------------------------------- + + /// + /// Attempt to commit the given multi-record transaction. First, the expected record versions are + /// sent to the server nodes for verification. If all nodes return success, the transaction is + /// committed. Otherwise, the transaction is aborted. + ///

    + /// Requires server version 8.0+ + ///

    + ///
    + /// multi-record transaction + public CommitStatus.CommitStatusType Commit(Txn txn) + { + if (!txn.SetRollAttempted()) + { + return CommitStatus.CommitStatusType.ALREADY_ATTEMPTED; + } + + TxnRoll tr = new(cluster, txn); + return tr.Commit(txnVerifyPolicyDefault, txnRollPolicyDefault); + } + + /// + /// Abort and rollback the given multi-record transaction. + ///

    + /// Requires server version 8.0+ + ///

    + ///
    + /// multi-record transaction + public AbortStatus.AbortStatusType Abort(Txn txn) + { + if (!txn.SetRollAttempted()) + { + return AbortStatus.AbortStatusType.ALREADY_ATTEMPTED; + } + + TxnRoll tr = new(cluster, txn); + return tr.Abort(txnRollPolicyDefault); + } + + //------------------------------------------------------- + // Write Record Operations + //------------------------------------------------------- + + /// + /// Write record bin(s). + /// The policy specifies the command timeouts, record expiration and how the command is + /// handled when the record already exists. + /// + /// write configuration parameters, pass in null for defaults + /// unique record identifier + /// array of bin name/value pairs + /// if write fails + public void Put(WritePolicy policy, Key key, params Bin[] bins) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.WRITE); + command.Execute(); + } + + //------------------------------------------------------- + // String Operations + //------------------------------------------------------- + + /// + /// Append bin string values to existing record bin values. + /// The policy specifies the command timeout, record expiration and how the command is + /// handled when the record already exists. + /// This call only works for string values. + /// + /// write configuration parameters, pass in null for defaults + /// unique record identifier + /// array of bin name/value pairs + /// if append fails + public void Append(WritePolicy policy, Key key, params Bin[] bins) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.APPEND); + command.Execute(); + } + + /// + /// Prepend bin string values to existing record bin values. + /// The policy specifies the command timeout, record expiration and how the command is + /// handled when the record already exists. + /// This call works only for string values. + /// + /// write configuration parameters, pass in null for defaults + /// unique record identifier + /// array of bin name/value pairs + /// if prepend fails + public void Prepend(WritePolicy policy, Key key, params Bin[] bins) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.PREPEND); + command.Execute(); + } + + //------------------------------------------------------- + // Arithmetic Operations + //------------------------------------------------------- + + /// + /// Add integer/double bin values to existing record bin values. + /// The policy specifies the command timeout, record expiration and how the command is + /// handled when the record already exists. + /// + /// write configuration parameters, pass in null for defaults + /// unique record identifier + /// array of bin name/value pairs + /// if add fails + public void Add(WritePolicy policy, Key key, params Bin[] bins) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.ADD); + command.Execute(); + } + + //------------------------------------------------------- + // Delete Operations + //------------------------------------------------------- + + /// + /// Delete record for specified key. + /// Return whether record existed on server before deletion. + /// The policy specifies the command timeout. + /// + /// delete configuration parameters, pass in null for defaults + /// unique record identifier + /// if delete fails + public bool Delete(WritePolicy policy, Key key) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + DeleteCommand command = new DeleteCommand(cluster, policy, key); + command.Execute(); + return command.Existed(); + } + + /// + /// Delete records for specified keys. If a key is not found, the corresponding result + /// will be . + /// + /// Requires server version 6.0+ + /// + /// + /// batch configuration parameters, pass in null for defaults + /// delete configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// which contains results for keys that did complete + public BatchResults Delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePolicy, Key[] keys) + { + if (keys.Length == 0) + { + return new BatchResults(new BatchRecord[0], true); + } + + if (batchPolicy == null) + { + batchPolicy = batchParentPolicyWriteDefault; + } + + if (deletePolicy == null) + { + deletePolicy = batchDeletePolicyDefault; + } + + if (batchPolicy.Txn != null) + { + TxnMonitor.AddKeys(cluster, batchPolicy, keys); + } + + BatchAttr attr = new BatchAttr(); + attr.SetDelete(deletePolicy); + + BatchRecord[] records = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + records[i] = new BatchRecord(keys[i], attr.hasWrite); + } + + try + { + BatchStatus status = new BatchStatus(true); + List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, null, records, attr, status); + } + + BatchExecutor.Execute(cluster, batchPolicy, commands, status); + return new BatchResults(records, status.GetStatus()); + } + catch (Exception e) + { + // Batch terminated on fatal error. + throw new AerospikeException.BatchRecordArray(records, e); + } + } + + /// + /// Remove records in specified namespace/set efficiently. This method is many orders of magnitude + /// faster than deleting records one at a time. + /// + /// See https://www.aerospike.com/docs/reference/info#truncate + /// + /// + /// This asynchronous server call may return before the truncation is complete. The user can still + /// write new records after the server returns because new records will have last update times + /// greater than the truncate cutoff (set at the time of truncate call). + /// + /// + /// info command configuration parameters, pass in null for defaults + /// required namespace + /// optional set name. Pass in null to delete all sets in namespace. + /// + /// optionally delete records before record last update time. + /// If specified, value must be before the current time. + /// Pass in null to delete all records in namespace/set regardless of last update time. + /// + public void Truncate(InfoPolicy policy, string ns, string set, DateTime? beforeLastUpdate) + { + if (policy == null) + { + policy = infoPolicyDefault; + } + + // Send truncate command to one node. That node will distribute the command to other nodes. + Node node = cluster.GetRandomNode(); + + StringBuilder sb = new StringBuilder(200); + + if (set != null) + { + sb.Append("truncate:namespace="); + sb.Append(ns); + sb.Append(";set="); + sb.Append(set); + } + else + { + sb.Append("truncate-namespace:namespace="); + sb.Append(ns); + } + + if (beforeLastUpdate.HasValue) + { + sb.Append(";lut="); + // Convert to nanoseconds since unix epoch. + sb.Append(Util.NanosFromEpoch(beforeLastUpdate.Value)); + } + + string response = Info.Request(policy, node, sb.ToString()); + + if (!response.Equals("ok", StringComparison.CurrentCultureIgnoreCase)) + { + throw new AerospikeException("Truncate failed: " + response); + } + } + + //------------------------------------------------------- + // Touch Operations + //------------------------------------------------------- + + /// + /// Reset record's time to expiration using the policy's expiration. + /// Fail if the record does not exist. + /// + /// write configuration parameters, pass in null for defaults + /// unique record identifier + /// if touch fails + public void Touch(WritePolicy policy, Key key) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + TouchCommand command = new TouchCommand(cluster, policy, key); + command.Execute(); + } + + //------------------------------------------------------- + // Existence-Check Operations + //------------------------------------------------------- + + /// + /// Determine if a record key exists. + /// Return whether record exists or not. + /// The policy can be used to specify timeouts. + /// + /// generic configuration parameters, pass in null for defaults + /// unique record identifier + /// if command fails + public bool Exists(Policy policy, Key key) + { + if (policy == null) + { + policy = readPolicyDefault; + } + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(key.ns); + } + + ExistsCommand command = new ExistsCommand(cluster, policy, key); + command.Execute(); + return command.Exists(); + } + + /// + /// Check if multiple record keys exist in one batch call. + /// The returned boolean array is in positional order with the original key array order. + /// + /// batch configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// which contains results for keys that did complete + public bool[] Exists(BatchPolicy policy, Key[] keys) + { + if (keys.Length == 0) + { + return new bool[0]; + } + + if (policy == null) + { + policy = batchPolicyDefault; + } + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(keys); + } + + bool[] existsArray = new bool[keys.Length]; + + try + { + BatchStatus status = new BatchStatus(false); + + if (policy.allowProleReads) + { + // Send all requests to a single random node. + Node node = cluster.GetRandomNode(); + BatchNode batchNode = new BatchNode(node, keys); + BatchCommand command = new BatchExistsArrayCommand(cluster, batchNode, policy, keys, existsArray, status); + BatchExecutor.Execute(command, status); + return existsArray; + } + + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchExistsArrayCommand(cluster, batchNode, policy, keys, existsArray, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return existsArray; + } + catch (Exception e) + { + throw new AerospikeException.BatchExists(existsArray, e); + } + } + + //------------------------------------------------------- + // Read Record Operations + //------------------------------------------------------- + + /// + /// Read entire record for specified key. + /// If found, return record instance. If not found, return null. + /// The policy can be used to specify timeouts. + /// + /// generic configuration parameters, pass in null for defaults + /// unique record identifier + /// if read fails + public Record Get(Policy policy, Key key) + { + if (policy == null) + { + policy = readPolicyDefault; + } + + policy.Txn?.SetNamespace(key.ns); + + ReadCommand command = new ReadCommand(cluster, policy, key); + command.Execute(); + return command.Record; + } + + /// + /// Read record header and bins for specified key. + /// If found, return record instance. If not found, return null. + /// The policy can be used to specify timeouts. + /// + /// generic configuration parameters, pass in null for defaults + /// unique record identifier + /// bins to retrieve + /// if read fails + public Record Get(Policy policy, Key key, params string[] binNames) + { + if (policy == null) + { + policy = readPolicyDefault; + } + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(key.ns); + } + + ReadCommand command = new ReadCommand(cluster, policy, key, binNames); + command.Execute(); + return command.Record; + } + + /// + /// Read record generation and expiration only for specified key. Bins are not read. + /// If found, return record instance. If not found, return null. + /// The policy can be used to specify timeouts. + /// + /// generic configuration parameters, pass in null for defaults + /// unique record identifier + /// if read fails + public Record GetHeader(Policy policy, Key key) + { + if (policy == null) + { + policy = readPolicyDefault; + } + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(key.ns); + } + + ReadHeaderCommand command = new ReadHeaderCommand(cluster, policy, key); + command.Execute(); + return command.Record; + } + + //------------------------------------------------------- + // Batch Read Operations + //------------------------------------------------------- + + /// + /// Read multiple records for specified batch keys in one batch call. + /// This method allows different namespaces/bins to be requested for each key in the batch. + /// The returned records are located in the same list. + /// If the BatchRead key field is not found, the corresponding record field will be null. + /// + /// batch configuration parameters, pass in null for defaults + /// list of unique record identifiers and the bins to retrieve. + /// The returned records are located in the same list. + /// true if all batch key requests succeeded + /// if read fails + public bool Get(BatchPolicy policy, List records) + { + if (records.Count == 0) + { + return true; + } + + if (policy == null) + { + policy = batchPolicyDefault; + } + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(records); + } + + BatchStatus status = new BatchStatus(true); + List batchNodes = BatchNode.GenerateList(cluster, policy, records, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchReadListCommand(cluster, batchNode, policy, records, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return status.GetStatus(); + } + + /// + /// Read multiple records for specified keys in one batch call. + /// The returned records are in positional order with the original key array order. + /// If a key is not found, the positional record will be null. + /// + /// batch configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// which contains results for keys that did complete + public Record[] Get(BatchPolicy policy, Key[] keys) + { + if (keys.Length == 0) + { + return new Record[0]; + } + + if (policy == null) + { + policy = batchPolicyDefault; + } + + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(keys); + } + + + Record[] records = new Record[keys.Length]; + + try + { + BatchStatus status = new BatchStatus(false); + + if (policy.allowProleReads) + { + // Send all requests to a single random node. + Node node = cluster.GetRandomNode(); + BatchNode batchNode = new BatchNode(node, keys); + BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_GET_ALL, false, status); + BatchExecutor.Execute(command, status); + return records; + } + + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_GET_ALL, false, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return records; + } + catch (Exception e) + { + throw new AerospikeException.BatchRecords(records, e); + } + } + + /// + /// Read multiple record headers and bins for specified keys in one batch call. + /// The returned records are in positional order with the original key array order. + /// If a key is not found, the positional record will be null. + /// + /// batch configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// array of bins to retrieve + /// which contains results for keys that did complete + public Record[] Get(BatchPolicy policy, Key[] keys, params string[] binNames) + { + if (keys.Length == 0) + { + return new Record[0]; + } + + if (policy == null) + { + policy = batchPolicyDefault; + } + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(keys); + } + + Record[] records = new Record[keys.Length]; + + try + { + BatchStatus status = new BatchStatus(false); + + if (policy.allowProleReads) + { + // Send all requests to a single random node. + Node node = cluster.GetRandomNode(); + BatchNode batchNode = new BatchNode(node, keys); + BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, binNames, null, records, Command.INFO1_READ, false, status); + BatchExecutor.Execute(command, status); + return records; + } + + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, binNames, null, records, Command.INFO1_READ, false, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return records; + } + catch (Exception e) + { + throw new AerospikeException.BatchRecords(records, e); + } + } + + /// + /// Read multiple records for specified keys using read operations in one batch call. + /// The returned records are in positional order with the original key array order. + /// If a key is not found, the positional record will be null. + /// + /// batch configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// array of read operations on record + /// which contains results for keys that did complete + public Record[] Get(BatchPolicy policy, Key[] keys, params Operation[] ops) + { + if (keys.Length == 0) + { + return new Record[0]; + } + + if (policy == null) + { + policy = batchPolicyDefault; + } + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(keys); + } + + Record[] records = new Record[keys.Length]; + + try + { + BatchStatus status = new BatchStatus(false); + + if (policy.allowProleReads) + { + // Send all requests to a single random node. + Node node = cluster.GetRandomNode(); + BatchNode batchNode = new BatchNode(node, keys); + BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, ops, records, Command.INFO1_READ, true, status); + BatchExecutor.Execute(command, status); + return records; + } + + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, ops, records, Command.INFO1_READ, true, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return records; + } + catch (Exception e) + { + throw new AerospikeException.BatchRecords(records, e); + } + } + + /// + /// Read multiple record header data for specified keys in one batch call. + /// The returned records are in positional order with the original key array order. + /// If a key is not found, the positional record will be null. + /// + /// batch configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// which contains results for keys that did complete + public Record[] GetHeader(BatchPolicy policy, Key[] keys) + { + if (keys.Length == 0) + { + return new Record[0]; + } + + if (policy == null) + { + policy = batchPolicyDefault; + } + + + if (policy.Txn != null) + { + policy.Txn.SetNamespace(keys); + } + + + Record[] records = new Record[keys.Length]; + + try + { + BatchStatus status = new BatchStatus(false); + + if (policy.allowProleReads) + { + // Send all requests to a single random node. + Node node = cluster.GetRandomNode(); + BatchNode batchNode = new BatchNode(node, keys); + BatchCommand command = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_NOBINDATA, false, status); + BatchExecutor.Execute(command, status); + return records; + } + + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, null, false, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchGetArrayCommand(cluster, batchNode, policy, keys, null, null, records, Command.INFO1_READ | Command.INFO1_NOBINDATA, false, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return records; + } + catch (Exception e) + { + throw new AerospikeException.BatchRecords(records, e); + } + } + + //------------------------------------------------------- + // Join methods + //------------------------------------------------------- + + /// + /// Read specified bins in left record and then join with right records. Each join bin name + /// (Join.leftKeysBinName) must exist in the left record. The join bin must contain a list of + /// keys. Those key are used to retrieve other records using a separate batch get. + /// + /// generic configuration parameters, pass in null for defaults + /// unique main record identifier + /// array of bins to retrieve + /// array of join definitions + /// if main read or join reads fail + public Record Join(BatchPolicy policy, Key key, string[] binNames, params Join[] joins) + { + string[] names = new string[binNames.Length + joins.Length]; + int count = 0; + + foreach (string binName in binNames) + { + names[count++] = binName; + } + + foreach (Join join in joins) + { + names[count++] = join.leftKeysBinName; + } + Record record = Get(policy, key, names); + JoinRecords(policy, record, joins); + return record; + } + + /// + /// Read all bins in left record and then join with right records. Each join bin name + /// (Join.binNameKeys) must exist in the left record. The join bin must contain a list of + /// keys. Those key are used to retrieve other records using a separate batch get. + /// + /// generic configuration parameters, pass in null for defaults + /// unique main record identifier + /// array of join definitions + /// if main read or join reads fail + public Record Join(BatchPolicy policy, Key key, params Join[] joins) + { + Record record = Get(policy, key); + JoinRecords(policy, record, joins); + return record; + } + + //------------------------------------------------------- + // Generic Database Operations + //------------------------------------------------------- + + /// + /// Perform multiple read/write operations on a single key in one batch call. + /// An example would be to add an integer value to an existing record and then + /// read the result, all in one database call. + /// + /// The server executes operations in the same order as the operations array. + /// Both scalar bin operations (Operation) and CDT bin operations (ListOperation, + /// MapOperation) can be performed in same call. + /// + /// + /// write configuration parameters, pass in null for defaults + /// unique record identifier + /// database operations to perform + /// if command fails + public Record Operate(WritePolicy policy, Key key, params Operation[] operations) + { + OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, operations); + + if (args.hasWrite) + { + policy = args.writePolicy; + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + OperateCommandWrite command = new(cluster, key, args); + command.Execute(); + return command.Record; + } + else + { + if (policy?.Txn != null) + { + policy.Txn.SetNamespace(key.ns); + } + + OperateCommandRead command = new(cluster, key, args); + command.Execute(); + return command.Record; + } + } + + //------------------------------------------------------- + // Batch Read/Write Operations + //------------------------------------------------------- + + /// + /// Read/Write multiple records for specified batch keys in one batch call. + /// This method allows different namespaces/bins for each key in the batch. + /// The returned records are located in the same list. + /// + /// can be , , or + /// . + /// + /// + /// Requires server version 6.0+ + /// + /// + /// batch configuration parameters, pass in null for defaults + /// list of unique record identifiers and read/write operations + /// true if all batch sub-commands succeeded + /// if command fails + public bool Operate(BatchPolicy policy, List records) + { + if (records.Count == 0) + { + return true; + } + + if (policy == null) + { + policy = batchParentPolicyWriteDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKeys(cluster, policy, records); + } + + BatchStatus status = new BatchStatus(true); + List batchNodes = BatchNode.GenerateList(cluster, policy, records, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchOperateListCommand(cluster, batchNode, policy, records, status); + } + BatchExecutor.Execute(cluster, policy, commands, status); + return status.GetStatus(); + } + + /// + /// Perform read/write operations on multiple keys. If a key is not found, the corresponding result + /// will be . + /// + /// Requires server version 6.0+ + /// + /// + /// batch configuration parameters, pass in null for defaults + /// write configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// + /// read/write operations to perform. is not allowed because it returns a + /// variable number of bins and makes it difficult (sometimes impossible) to lineup operations with + /// results. Instead, use for each bin name. + /// + /// which contains results for keys that did complete + public BatchResults Operate(BatchPolicy batchPolicy, BatchWritePolicy writePolicy, Key[] keys, params Operation[] ops) + { + if (keys.Length == 0) + { + return new BatchResults(new BatchRecord[0], true); + } + + if (batchPolicy == null) + { + batchPolicy = batchParentPolicyWriteDefault; + } + + if (writePolicy == null) + { + writePolicy = batchWritePolicyDefault; + } + + if (batchPolicy.Txn != null) + { + TxnMonitor.AddKeys(cluster, batchPolicy, keys); + } + + BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops); + BatchRecord[] records = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + records[i] = new BatchRecord(keys[i], attr.hasWrite); + } + + try + { + BatchStatus status = new BatchStatus(true); + List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchOperateArrayCommand(cluster, batchNode, batchPolicy, keys, ops, records, attr, status); + } + + BatchExecutor.Execute(cluster, batchPolicy, commands, status); + return new BatchResults(records, status.GetStatus()); + } + catch (Exception e) + { + throw new AerospikeException.BatchRecordArray(records, e); + } + } + + //------------------------------------------------------- + // Scan Operations + //------------------------------------------------------- + + /// + /// Read all records in specified namespace and set. If the policy's + /// concurrentNodes is specified, each server node will be read in + /// parallel. Otherwise, server nodes are read in series. + /// + /// This call will block until the scan is complete - callbacks are made + /// within the scope of this call. + /// + /// + /// scan configuration parameters, pass in null for defaults + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// read callback method - called with record data + /// + /// optional bin to retrieve. All bins will be returned if not specified. + /// + /// if scan fails + public void ScanAll(ScanPolicy policy, string ns, string setName, ScanCallback callback, params string[] binNames) + { + if (policy == null) + { + policy = scanPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + PartitionTracker tracker = new PartitionTracker(policy, nodes); + ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker); + } + + /// + /// Read all records in specified namespace and set for one node only. + /// The node is specified by name. + /// + /// This call will block until the scan is complete - callbacks are made + /// within the scope of this call. + /// + /// + /// scan configuration parameters, pass in null for defaults + /// server node name + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// read callback method - called with record data + /// + /// optional bin to retrieve. All bins will be returned if not specified. + /// + /// if scan fails + public void ScanNode(ScanPolicy policy, string nodeName, string ns, string setName, ScanCallback callback, params string[] binNames) + { + Node node = cluster.GetNode(nodeName); + ScanNode(policy, node, ns, setName, callback, binNames); + } + + /// + /// Read all records in specified namespace and set for one node only. + /// + /// This call will block until the scan is complete - callbacks are made + /// within the scope of this call. + /// + /// + /// scan configuration parameters, pass in null for defaults + /// server node + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// read callback method - called with record data + /// + /// optional bin to retrieve. All bins will be returned if not specified. + /// + /// if scan fails + public void ScanNode(ScanPolicy policy, Node node, string ns, string setName, ScanCallback callback, params string[] binNames) + { + if (policy == null) + { + policy = scanPolicyDefault; + } + + PartitionTracker tracker = new PartitionTracker(policy, node); + ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker); + } + + /// + /// Read records in specified namespace, set and partition filter. + /// + /// This call will block until the scan is complete - callbacks are made + /// within the scope of this call. + /// + /// + /// scan configuration parameters, pass in null for defaults + /// filter on a subset of data partitions + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// read callback method - called with record data + /// optional bin to retrieve. All bins will be returned if not specified. + /// if scan fails + public void ScanPartitions(ScanPolicy policy, PartitionFilter partitionFilter, string ns, string setName, ScanCallback callback, params string[] binNames) + { + if (policy == null) + { + policy = scanPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + PartitionTracker tracker = new PartitionTracker(policy, nodes, partitionFilter); + ScanExecutor.ScanPartitions(cluster, policy, ns, setName, binNames, callback, tracker); + } + + //--------------------------------------------------------------- + // User defined functions + //--------------------------------------------------------------- + + /// + /// Register package located in a file containing user defined functions with server. + /// This asynchronous server call will return before command is complete. + /// The user can optionally wait for command completion by using the returned + /// RegisterTask instance. + /// + /// generic configuration parameters, pass in null for defaults + /// path of client file containing user defined functions, relative to current directory + /// path to store user defined functions on the server, relative to configured script directory. + /// language of user defined functions + /// if register fails + public RegisterTask Register(Policy policy, string clientPath, string serverPath, Language language) + { + if (policy == null) + { + policy = writePolicyDefault; + } + string content = Util.ReadFileEncodeBase64(clientPath); + return RegisterCommand.Register(cluster, policy, content, serverPath, language); + } + + /// + /// Register package located in a resource containing user defined functions with server. + /// This asynchronous server call will return before command is complete. + /// The user can optionally wait for command completion by using the returned + /// RegisterTask instance. + /// + /// generic configuration parameters, pass in null for defaults + /// assembly where resource is located. Current assembly can be obtained by: Assembly.GetExecutingAssembly() + /// namespace path where Lua resource is located. Example: Aerospike.Client.Resources.mypackage.lua + /// path to store user defined functions on the server, relative to configured script directory. + /// language of user defined functions + /// if register fails + public RegisterTask Register(Policy policy, Assembly resourceAssembly, string resourcePath, string serverPath, Language language) + { + if (policy == null) + { + policy = writePolicyDefault; + } + string content; + using (Stream stream = resourceAssembly.GetManifestResourceStream(resourcePath)) + { + byte[] bytes = new byte[stream.Length]; + stream.Read(bytes, 0, bytes.Length); + content = Convert.ToBase64String(bytes); + } + return RegisterCommand.Register(cluster, policy, content, serverPath, language); + } + + /// + /// Register UDF functions located in a code string with server. Example: + /// + /// String code = @" + /// local function reducer(val1,val2) + /// return val1 + val2 + /// end + /// + /// function sum_single_bin(stream,name) + /// local function mapper(rec) + /// return rec[name] + /// end + /// return stream : map(mapper) : reduce(reducer) + /// end + ///"; + /// + /// client.RegisterUdfString(null, code, "mysum.lua", Language.LUA); + /// + /// + /// This asynchronous server call will return before command is complete. + /// The user can optionally wait for command completion by using the returned + /// RegisterTask instance. + /// + /// + /// generic configuration parameters, pass in null for defaults + /// code string containing user defined functions + /// path to store user defined functions on the server, relative to configured script directory. + /// language of user defined functions + /// if register fails + public RegisterTask RegisterUdfString(Policy policy, string code, string serverPath, Language language) + { + if (policy == null) + { + policy = writePolicyDefault; + } + byte[] bytes = ByteUtil.StringToUtf8(code); + string content = Convert.ToBase64String(bytes); + return RegisterCommand.Register(cluster, policy, content, serverPath, language); + } + + /// + /// Remove user defined function from server nodes. + /// + /// info configuration parameters, pass in null for defaults + /// location of UDF on server nodes. Example: mylua.lua + /// if remove fails + public void RemoveUdf(InfoPolicy policy, string serverPath) + { + if (policy == null) + { + policy = infoPolicyDefault; + } + // Send UDF command to one node. That node will distribute the UDF command to other nodes. + string command = "udf-remove:filename=" + serverPath; + Node node = cluster.GetRandomNode(); + string response = Info.Request(policy, node, command); + + if (response.Equals("ok", StringComparison.CurrentCultureIgnoreCase)) + { + return; + } + + if (response.StartsWith("error=file_not_found")) + { + // UDF has already been removed. + return; + } + throw new AerospikeException("Remove UDF failed: " + response); + } + + /// + /// Execute user defined function on server and return results. + /// The function operates on a single record. + /// The package name is used to locate the udf file location: + /// + /// udf file = <server udf dir>/<package name>.lua + /// + /// + /// generic configuration parameters, pass in null for defaults + /// unique record identifier + /// server package name where user defined function resides + /// user defined function + /// arguments passed in to user defined function + /// if command fails + public object Execute(WritePolicy policy, Key key, string packageName, string functionName, params Value[] args) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (policy.Txn != null) + { + TxnMonitor.AddKey(cluster, policy, key); + } + + ExecuteCommand command = new ExecuteCommand(cluster, policy, key, packageName, functionName, args); + command.Execute(); + + Record record = command.Record; + + if (record == null || record.bins == null) + { + return null; + } + + IDictionary map = record.bins; + object obj; + + if (map.TryGetValue("SUCCESS", out obj)) + { + return obj; + } + + if (map.TryGetValue("FAILURE", out obj)) + { + throw new AerospikeException(obj.ToString()); + } + throw new AerospikeException("Invalid UDF return value"); + } + + /// + /// Execute user defined function on server for each key and return results. + /// The package name is used to locate the udf file location: + /// + /// udf file = <server udf dir>/<package name>.lua + /// + /// + /// Requires server version 6.0+ + /// + /// + /// batch configuration parameters, pass in null for defaults + /// udf configuration parameters, pass in null for defaults + /// array of unique record identifiers + /// server package name where user defined function resides + /// user defined function + /// arguments passed in to user defined function + /// which contains results for keys that did complete + public BatchResults Execute(BatchPolicy batchPolicy, BatchUDFPolicy udfPolicy, Key[] keys, string packageName, string functionName, params Value[] functionArgs) + { + if (keys.Length == 0) + { + return new BatchResults(new BatchRecord[0], true); + } + + if (batchPolicy == null) + { + batchPolicy = batchParentPolicyWriteDefault; + } + + if (udfPolicy == null) + { + udfPolicy = batchUDFPolicyDefault; + } + + if (batchPolicy.Txn != null) + { + TxnMonitor.AddKeys(cluster, batchPolicy, keys); + } + + byte[] argBytes = Packer.Pack(functionArgs); + + BatchAttr attr = new BatchAttr(); + attr.SetUDF(udfPolicy); + + BatchRecord[] records = new BatchRecord[keys.Length]; + + for (int i = 0; i < keys.Length; i++) + { + records[i] = new BatchRecord(keys[i], attr.hasWrite); + } + + try + { + BatchStatus status = new BatchStatus(true); + List batchNodes = BatchNode.GenerateList(cluster, batchPolicy, keys, records, attr.hasWrite, status); + BatchCommand[] commands = new BatchCommand[batchNodes.Count]; + int count = 0; + + foreach (BatchNode batchNode in batchNodes) + { + commands[count++] = new BatchUDFCommand(cluster, batchNode, batchPolicy, keys, packageName, functionName, argBytes, records, attr, status); + } + + BatchExecutor.Execute(cluster, batchPolicy, commands, status); + return new BatchResults(records, status.GetStatus()); + } + catch (Exception e) + { + // Batch terminated on fatal error. + throw new AerospikeException.BatchRecordArray(records, e); + } + } + + //---------------------------------------------------------- + // Query/Execute + //---------------------------------------------------------- + + /// + /// Apply user defined function on records that match the background query statement filter. + /// Records are not returned to the client. + /// This asynchronous server call will return before the command is complete. + /// The user can optionally wait for command completion by using the returned + /// ExecuteTask instance. + /// + /// configuration parameters, pass in null for defaults + /// background query definition + /// server package where user defined function resides + /// function name + /// to pass to function name, if any + /// if command fails + public ExecuteTask Execute(WritePolicy policy, Statement statement, string packageName, string functionName, params Value[] functionArgs) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + statement.PackageName = packageName; + statement.FunctionName = functionName; + statement.FunctionArgs = functionArgs; + + cluster.AddCommandCount(); + + ulong taskId = statement.PrepareTaskId(); + Node[] nodes = cluster.ValidateNodes(); + Executor executor = new Executor(nodes.Length); + + foreach (Node node in nodes) + { + ServerCommand command = new ServerCommand(cluster, node, policy, statement, taskId); + executor.AddCommand(command); + } + + executor.Execute(nodes.Length); + return new ExecuteTask(cluster, policy, statement, taskId); + } + + /// + /// Apply operations on records that match the background query statement filter. + /// Records are not returned to the client. + /// This asynchronous server call will return before the command is complete. + /// The user can optionally wait for command completion by using the returned + /// ExecuteTask instance. + /// + /// write configuration parameters, pass in null for defaults + /// background query definition + /// list of operations to be performed on selected records + /// if command fails + public ExecuteTask Execute(WritePolicy policy, Statement statement, params Operation[] operations) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + if (operations.Length > 0) + { + statement.Operations = operations; + } + + cluster.AddCommandCount(); + + ulong taskId = statement.PrepareTaskId(); + Node[] nodes = cluster.ValidateNodes(); + Executor executor = new Executor(nodes.Length); + + foreach (Node node in nodes) + { + ServerCommand command = new ServerCommand(cluster, node, policy, statement, taskId); + executor.AddCommand(command); + } + executor.Execute(nodes.Length); + return new ExecuteTask(cluster, policy, statement, taskId); + } + + //-------------------------------------------------------- + // Query functions + //-------------------------------------------------------- + + /// + /// Execute query and call action for each record returned from server. + /// + /// generic configuration parameters, pass in null for defaults + /// query definition + /// action methods to be called for each record + /// if query fails + public void Query(QueryPolicy policy, Statement statement, Action action) + { + using (RecordSet rs = Query(policy, statement)) + { + while (rs.Next()) + { + action(rs.Key, rs.Record); + } + } + } + + /// + /// Execute query and return record iterator. The query executor puts records on a queue in + /// separate threads. The calling thread concurrently pops records off the queue through the + /// record iterator. + /// + /// generic configuration parameters, pass in null for defaults + /// query definition + /// if query fails + public RecordSet Query(QueryPolicy policy, Statement statement) + { + if (policy == null) + { + policy = queryPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + + if (cluster.hasPartitionQuery || statement.filter == null) + { + PartitionTracker tracker = new PartitionTracker(policy, statement, nodes); + QueryPartitionExecutor executor = new QueryPartitionExecutor(cluster, policy, statement, nodes.Length, tracker); + return executor.RecordSet; + } + else + { + QueryRecordExecutor executor = new QueryRecordExecutor(cluster, policy, statement, nodes); + executor.Execute(); + return executor.RecordSet; + } + } + + /// + /// Execute query on all server nodes and return records via the listener. This method will + /// block until the query is complete. Listener callbacks are made within the scope of this call. + /// + /// If is not 1, the supplied listener must handle + /// shared data in a thread-safe manner, because the listener will be called by multiple query + /// threads (one thread per node) in parallel. + /// + /// + /// Requires server version 6.0+ if using a secondary index query. + /// + /// + /// query configuration parameters, pass in null for defaults + /// query definition + /// where to send results + /// if query fails + public void Query(QueryPolicy policy, Statement statement, QueryListener listener) + { + if (policy == null) + { + policy = queryPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + + if (cluster.hasPartitionQuery || statement.filter == null) + { + PartitionTracker tracker = new PartitionTracker(policy, statement, nodes); + QueryListenerExecutor.execute(cluster, policy, statement, listener, tracker); + } + else + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Query by partition is not supported"); + } + } + + /// + /// Execute query for specified partitions and return records via the listener. This method will + /// block until the query is complete. Listener callbacks are made within the scope of this call. + /// + /// If is not 1, the supplied listener must handle + /// shared data in a thread-safe manner, because the listener will be called by multiple query + /// threads (one thread per node) in parallel. + /// + /// + /// The completion status of all partitions is stored in the partitionFilter when the query terminates. + /// This partitionFilter can then be used to resume an incomplete query at a later time. + /// This is the preferred method for query terminate/resume functionality. + /// + /// + /// Requires server version 6.0+ if using a secondary index query. + /// + /// + /// query configuration parameters, pass in null for defaults + /// query definition + /// + /// data partition filter. Set to for all partitions. + /// + /// where to send results + /// if query fails + public void Query + ( + QueryPolicy policy, + Statement statement, + PartitionFilter partitionFilter, + QueryListener listener + ) + { + if (policy == null) + { + policy = queryPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + + if (cluster.hasPartitionQuery || statement.filter == null) + { + PartitionTracker tracker = new PartitionTracker(policy, statement, nodes, partitionFilter); + QueryListenerExecutor.execute(cluster, policy, statement, listener, tracker); + } + else + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Query by partition is not supported"); + } + } + + /// + /// Execute query for specified partitions and return record iterator. The query executor puts + /// records on a queue in separate threads. The calling thread concurrently pops records off + /// the queue through the record iterator. + /// + /// Requires server version 6.0+ if using a secondary index query. + /// + /// + /// query configuration parameters, pass in null for defaults + /// query definition + /// filter on a subset of data partitions + /// if query fails + public RecordSet QueryPartitions + ( + QueryPolicy policy, + Statement statement, + PartitionFilter partitionFilter + ) + { + if (policy == null) + { + policy = queryPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + + if (cluster.hasPartitionQuery || statement.filter == null) + { + PartitionTracker tracker = new PartitionTracker(policy, statement, nodes, partitionFilter); + QueryPartitionExecutor executor = new QueryPartitionExecutor(cluster, policy, statement, nodes.Length, tracker); + return executor.RecordSet; + } + else + { + throw new AerospikeException(ResultCode.PARAMETER_ERROR, "QueryPartitions() not supported"); + } + } + + /// + /// Execute query, apply statement's aggregation function, and return result iterator. + /// The aggregation function should be located in a Lua script file that can be found from the + /// "LuaConfig.PackagePath" paths static variable. The default package path is "udf/?.lua" + /// where "?" is the packageName. + /// + /// The query executor puts results on a queue in separate threads. The calling thread + /// concurrently pops results off the queue through the ResultSet iterator. + /// The aggregation function is called on both server and client (final reduce). + /// Therefore, the Lua script file must also reside on both server and client. + /// + /// + /// query configuration parameters, pass in null for defaults + /// query definition + /// server package where user defined function resides + /// aggregation function name + /// arguments to pass to function name, if any + /// if query fails + public ResultSet QueryAggregate + ( + QueryPolicy policy, + Statement statement, + string packageName, + string functionName, + params Value[] functionArgs + ) + { + statement.SetAggregateFunction(packageName, functionName, functionArgs); + return QueryAggregate(policy, statement); + } + + /// + /// Execute query, apply statement's aggregation function, call action for each aggregation + /// object returned from server. + /// + /// query configuration parameters, pass in null for defaults + /// + /// query definition with aggregate functions already initialized by SetAggregateFunction(). + /// + /// action methods to be called for each aggregation object + /// if query fails + public void QueryAggregate(QueryPolicy policy, Statement statement, Action action) + { + using (ResultSet rs = QueryAggregate(policy, statement)) + { + while (rs.Next()) + { + action(rs.Object); + } + } + } + + /// + /// Execute query, apply statement's aggregation function, and return result iterator. + /// The aggregation function should be initialized via the statement's SetAggregateFunction() + /// and should be located in a Lua resource file located in an assembly. + /// + /// The query executor puts results on a queue in separate threads. The calling thread + /// concurrently pops results off the queue through the ResultSet iterator. + /// The aggregation function is called on both server and client (final reduce). + /// Therefore, the Lua script file must also reside on both server and client. + /// + /// + /// query configuration parameters, pass in null for defaults + /// + /// query definition with aggregate functions already initialized by SetAggregateFunction(). + /// + /// if query fails + public ResultSet QueryAggregate(QueryPolicy policy, Statement statement) + { + if (policy == null) + { + policy = queryPolicyDefault; + } + + Node[] nodes = cluster.ValidateNodes(); + QueryAggregateExecutor executor = new QueryAggregateExecutor(cluster, policy, statement, nodes); + executor.Execute(); + return executor.ResultSet; + } + + //-------------------------------------------------------- + // Secondary Index functions + //-------------------------------------------------------- + + /// + /// Create scalar secondary index. + /// This asynchronous server call will return before command is complete. + /// The user can optionally wait for command completion by using the returned + /// IndexTask instance. + /// + /// generic configuration parameters, pass in null for defaults + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// name of secondary index + /// bin name that data is indexed on + /// underlying data type of secondary index + /// if index create fails + public IndexTask CreateIndex + ( + Policy policy, + string ns, + string setName, + string indexName, + string binName, + IndexType indexType + ) + { + return CreateIndex(policy, ns, setName, indexName, binName, indexType, IndexCollectionType.DEFAULT); + } + + /// + /// Create complex secondary index on bins containing collections. + /// This asynchronous server call will return before command is complete. + /// The user can optionally wait for command completion by using the returned + /// IndexTask instance. + /// + /// generic configuration parameters, pass in null for defaults + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// name of secondary index + /// bin name that data is indexed on + /// underlying data type of secondary index + /// index collection type + /// optional context to index on elements within a CDT + /// if index create fails + public IndexTask CreateIndex + ( + Policy policy, + string ns, + string setName, + string indexName, + string binName, + IndexType indexType, + IndexCollectionType indexCollectionType, + params CTX[] ctx + ) + { + if (policy == null) + { + policy = writePolicyDefault; + } + + StringBuilder sb = new StringBuilder(1024); + sb.Append("sindex-create:ns="); + sb.Append(ns); + + if (setName != null && setName.Length > 0) + { + sb.Append(";set="); + sb.Append(setName); + } + + sb.Append(";indexname="); + sb.Append(indexName); + + if (ctx != null && ctx.Length > 0) + { + byte[] bytes = PackUtil.Pack(ctx); + string base64 = Convert.ToBase64String(bytes); + + sb.Append(";context="); + sb.Append(base64); + } + + if (indexCollectionType != IndexCollectionType.DEFAULT) + { + sb.Append(";indextype="); + sb.Append(indexCollectionType); + } + + sb.Append(";indexdata="); + sb.Append(binName); + sb.Append(','); + sb.Append(indexType); + + // Send index command to one node. That node will distribute the command to other nodes. + String response = SendInfoCommand(policy, sb.ToString()); + + if (response.Equals("OK", StringComparison.CurrentCultureIgnoreCase)) + { + // Return task that could optionally be polled for completion. + return new IndexTask(cluster, policy, ns, indexName, true); + } + + ParseInfoError("Create index failed", response); + return null; + } + + /// + /// Delete secondary index. + /// This asynchronous server call will return before command is complete. + /// The user can optionally wait for command completion by using the returned + /// IndexTask instance. + /// + /// generic configuration parameters, pass in null for defaults + /// namespace - equivalent to database name + /// optional set name - equivalent to database table + /// name of secondary index + /// if index drop fails + public IndexTask DropIndex(Policy policy, string ns, string setName, string indexName) + { + if (policy == null) + { + policy = writePolicyDefault; + } + StringBuilder sb = new StringBuilder(500); + sb.Append("sindex-delete:ns="); + sb.Append(ns); + + if (setName != null && setName.Length > 0) + { + sb.Append(";set="); + sb.Append(setName); + } + sb.Append(";indexname="); + sb.Append(indexName); + + // Send index command to one node. That node will distribute the command to other nodes. + String response = SendInfoCommand(policy, sb.ToString()); + + if (response.Equals("OK", StringComparison.CurrentCultureIgnoreCase)) + { + return new IndexTask(cluster, policy, ns, indexName, false); + } + + ParseInfoError("Drop index failed", response); + return null; + } + + //----------------------------------------------------------------- + // XDR - Cross datacenter replication + //----------------------------------------------------------------- + + /// + /// Set XDR filter for given datacenter name and namespace. The expression filter indicates + /// which records XDR should ship to the datacenter. + /// + /// info configuration parameters, pass in null for defaults + /// XDR datacenter name + /// namespace - equivalent to database name + /// expression filter + /// if command fails + public void SetXDRFilter(InfoPolicy policy, string datacenter, string ns, Expression filter) + { + if (policy == null) + { + policy = infoPolicyDefault; + } + + // Send XDR command to one node. That node will distribute the XDR command to other nodes. + string command = "xdr-set-filter:dc=" + datacenter + ";namespace=" + ns + ";exp=" + filter.GetBase64(); + Node node = cluster.GetRandomNode(); + string response = Info.Request(policy, node, command); + + if (response.Equals("ok", StringComparison.CurrentCultureIgnoreCase)) + { + return; + } + + ParseInfoError("xdr-set-filter failed", response); + } + + //------------------------------------------------------- + // User administration + //------------------------------------------------------- + + /// + /// Create user with password and roles. Clear-text password will be hashed using bcrypt + /// before sending to server. + /// + /// admin configuration parameters, pass in null for defaults + /// user name + /// user password in clear-text format + /// variable arguments array of role names. Predefined roles are listed in Role.cs + public void CreateUser(AdminPolicy policy, string user, string password, IList roles) + { + string hash = AdminCommand.HashPassword(password); + AdminCommand command = new AdminCommand(); + command.CreateUser(cluster, policy, user, hash, roles); + } + + /// + /// Remove user from cluster. + /// + /// admin configuration parameters, pass in null for defaults + /// user name + public void DropUser(AdminPolicy policy, string user) + { + AdminCommand command = new AdminCommand(); + command.DropUser(cluster, policy, user); + } + + /// + /// Change user's password. + /// + /// admin configuration parameters, pass in null for defaults + /// user name + /// user password in clear-text format + public void ChangePassword(AdminPolicy policy, string user, string password) + { + if (cluster.user == null) + { + throw new AerospikeException("Invalid user"); + } + + byte[] userBytes = ByteUtil.StringToUtf8(user); + byte[] passwordBytes = ByteUtil.StringToUtf8(password); + + string hash = AdminCommand.HashPassword(password); + byte[] hashBytes = ByteUtil.StringToUtf8(hash); + + AdminCommand command = new AdminCommand(); + + if (Util.ByteArrayEquals(userBytes, cluster.user)) + { + // Change own password. + command.ChangePassword(cluster, policy, userBytes, hash); + } + else + { + // Change other user's password by user admin. + command.SetPassword(cluster, policy, userBytes, hash); + } + cluster.ChangePassword(userBytes, passwordBytes, hashBytes); + } + + /// + /// Add roles to user's list of roles. + /// + /// admin configuration parameters, pass in null for defaults + /// user name + /// role names. Predefined roles are listed in Role.cs + public void GrantRoles(AdminPolicy policy, string user, IList roles) + { + AdminCommand command = new AdminCommand(); + command.GrantRoles(cluster, policy, user, roles); + } + + /// + /// Remove roles from user's list of roles. + /// + /// admin configuration parameters, pass in null for defaults + /// user name + /// role names. Predefined roles are listed in Role.cs + public void RevokeRoles(AdminPolicy policy, string user, IList roles) + { + AdminCommand command = new AdminCommand(); + command.RevokeRoles(cluster, policy, user, roles); + } + + /// + /// Create user defined role. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// privileges assigned to the role. + /// if command fails + public void CreateRole(AdminPolicy policy, string roleName, IList privileges) + { + AdminCommand command = new AdminCommand(); + command.CreateRole(cluster, policy, roleName, privileges); + } + + /// + /// Create user defined role with optional privileges and whitelist. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// optional list of privileges assigned to role. + /// + /// optional list of allowable IP addresses assigned to role. + /// IP addresses can contain wildcards (ie. 10.1.2.0/24). + /// + /// if command fails + public void CreateRole(AdminPolicy policy, string roleName, IList privileges, IList whitelist) + { + AdminCommand command = new AdminCommand(); + command.CreateRole(cluster, policy, roleName, privileges, whitelist, 0, 0); + } + + /// + /// Create user defined role with optional privileges, whitelist and read/write quotas. + /// Quotas require server security configuration "enable-quotas" to be set to true. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// optional list of privileges assigned to role. + /// + /// optional list of allowable IP addresses assigned to role. + /// IP addresses can contain wildcards (ie. 10.1.2.0/24). + /// + /// optional maximum reads per second limit, pass in zero for no limit. + /// optional maximum writes per second limit, pass in zero for no limit. + /// if command fails + public void CreateRole + ( + AdminPolicy policy, + string roleName, + IList privileges, + IList whitelist, + int readQuota, + int writeQuota + ) + { + AdminCommand command = new AdminCommand(); + command.CreateRole(cluster, policy, roleName, privileges, whitelist, readQuota, writeQuota); + } + + /// + /// Drop user defined role. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// if command fails + public void DropRole(AdminPolicy policy, string roleName) + { + AdminCommand command = new AdminCommand(); + command.DropRole(cluster, policy, roleName); + } + + /// + /// Grant privileges to an user defined role. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// privileges assigned to the role. + /// if command fails + public void GrantPrivileges(AdminPolicy policy, string roleName, IList privileges) + { + AdminCommand command = new AdminCommand(); + command.GrantPrivileges(cluster, policy, roleName, privileges); + } + + /// + /// Revoke privileges from an user defined role. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// privileges assigned to the role. + /// if command fails + public void RevokePrivileges(AdminPolicy policy, string roleName, IList privileges) + { + AdminCommand command = new AdminCommand(); + command.RevokePrivileges(cluster, policy, roleName, privileges); + } + + /// + /// Set IP address whitelist for a role. If whitelist is null or empty, remove existing whitelist from role. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// + /// list of allowable IP addresses or null. + /// IP addresses can contain wildcards (ie. 10.1.2.0/24). + /// + /// if command fails + public void SetWhitelist(AdminPolicy policy, string roleName, IList whitelist) + { + AdminCommand command = new AdminCommand(); + command.SetWhitelist(cluster, policy, roleName, whitelist); + } + + /// + /// Set maximum reads/writes per second limits for a role. If a quota is zero, the limit is removed. + /// Quotas require server security configuration "enable-quotas" to be set to true. + /// + /// admin configuration parameters, pass in null for defaults + /// role name + /// maximum reads per second limit, pass in zero for no limit. + /// maximum writes per second limit, pass in zero for no limit. + /// if command fails + public void SetQuotas(AdminPolicy policy, string roleName, int readQuota, int writeQuota) + { + AdminCommand command = new AdminCommand(); + command.setQuotas(cluster, policy, roleName, readQuota, writeQuota); + } + + /// + /// Retrieve roles for a given user. + /// + /// admin configuration parameters, pass in null for defaults + /// user name filter + public User QueryUser(AdminPolicy policy, string user) + { + AdminCommand.UserCommand command = new AdminCommand.UserCommand(1); + return command.QueryUser(cluster, policy, user); + } + + /// + /// Retrieve all users and their roles. + /// + /// admin configuration parameters, pass in null for defaults + public List QueryUsers(AdminPolicy policy) + { + AdminCommand.UserCommand command = new AdminCommand.UserCommand(100); + return command.QueryUsers(cluster, policy); + } + + /// + /// Retrieve role definition. + /// + /// admin configuration parameters, pass in null for defaults + /// role name filter + /// if command fails + public Role QueryRole(AdminPolicy policy, string roleName) + { + AdminCommand.RoleCommand command = new AdminCommand.RoleCommand(1); + return command.QueryRole(cluster, policy, roleName); + } + + /// + /// Retrieve all roles. + /// + /// admin configuration parameters, pass in null for defaults + /// if command fails + public List QueryRoles(AdminPolicy policy) + { + AdminCommand.RoleCommand command = new AdminCommand.RoleCommand(100); + return command.QueryRoles(cluster, policy); + } + + //------------------------------------------------------- + // Internal Methods + //------------------------------------------------------- + + private string SendInfoCommand(Policy policy, string command) + { + Node node = cluster.GetRandomNode(); + Connection conn = node.GetConnection(policy.socketTimeout); + Info info; + + try + { + info = new Info(conn, command); + node.PutConnection(conn); + } + catch (Exception) + { + node.CloseConnectionOnError(conn); + throw; + } + return info.GetValue(); + } + + private void ParseInfoError(string prefix, string response) + { + string message = prefix + ": " + response; + string[] list = response.Split(':'); + + if (list.Length >= 2 && list[0].Equals("FAIL")) + { + int code = 0; + + try + { + code = Convert.ToInt32(list[1]); + } + catch (Exception) + { + } + throw new AerospikeException(code, message); + } + throw new AerospikeException(message); + } + + private void JoinRecords(BatchPolicy policy, Record record, Join[] joins) + { + if (record == null) + { + return; + } + + foreach (Join join in joins) + { + List keyList = (List)record.GetValue(join.leftKeysBinName); + + if (keyList != null) + { + Key[] keyArray = new Key[keyList.Count]; + int count = 0; + + foreach (object obj in keyList) + { + Value value = Value.Get(obj); + keyArray[count++] = new Key(join.rightNamespace, join.rightSetName, value); + } + + Record[] records; + if (join.rightBinNames == null || join.rightBinNames.Length == 0) + { + records = Get(policy, keyArray); + } + else + { + records = Get(policy, keyArray, join.rightBinNames); + } + record.bins[join.leftKeysBinName] = records; + } + } + } + } +} diff --git a/AerospikeClient/Main/Txn.cs b/AerospikeClient/Main/Txn.cs index 03b3f156..945d8cdd 100644 --- a/AerospikeClient/Main/Txn.cs +++ b/AerospikeClient/Main/Txn.cs @@ -1,229 +1,230 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -using System; -using System.Collections.Concurrent; - -namespace Aerospike.Client -{ - /// - /// Mutli-record transaction (MRT). Each command in the MRT must use the same namespace. - /// - public class Txn - { - public long Id { get; private set; } - public ConcurrentDictionary Reads { get; private set; } - public HashSet Writes { get; private set; } - public string Ns { get; private set; } - public int Deadline { get; set; } - - private bool monitorInDoubt; - - private bool rollAttempted; - - /// - /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with default capacities. - /// - public Txn() - { - Id = CreateId(); - Reads = new ConcurrentDictionary(); - Writes = new HashSet(); - } - - /// - /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with given capacities. - /// - /// expected number of record reads in the MRT. Minimum value is 16. - /// expected number of record writes in the MRT. Minimum value is 16. - public Txn(int readsCapacity, int writesCapacity) - { - if (readsCapacity < 16) - { - readsCapacity = 16; - } - - if (writesCapacity < 16) - { - writesCapacity = 16; - } - - Id = CreateId(); - Reads = new ConcurrentDictionary(-1, readsCapacity); // TODO: concurrency level - Writes = new HashSet(writesCapacity); - } - - private static long CreateId() - { - // An id of zero is considered invalid. Create random numbers - // in a loop until non-zero is returned. - Random r = new(); - long id = r.NextInt64(); - - while (id == 0) - { - id = r.NextInt64(); - } - return id; - } - - /// - /// Process the results of a record read. For internal use only. - /// - /// - /// - internal void OnRead(Key key, long? version) - { - if (version.HasValue) - { - Reads.TryAdd(key, version.Value); - } - } - - /// - /// Get record version for a given key. - /// - /// - /// - public long? GetReadVersion(Key key) - { - if (Reads.ContainsKey(key)) - { - return Reads[key]; - } - else - { - return null; - } - } - - /// - /// Process the results of a record write. For internal use only. - /// - /// - /// - /// - public void OnWrite(Key key, long? version, int resultCode) - { - // Write commands set namespace prior to sending the command, so there is - // no need to call it here when receiving the response. - if (version.HasValue) - { - Reads.TryAdd(key, version.Value); - } - else - { - if (resultCode == ResultCode.OK) - { - Reads.Remove(key, out _); - Writes.Add(key); - } - } - } - - /// - /// Add key to write hash when write command is in doubt (usually caused by timeout). - /// - public void OnWriteInDoubt(Key key) - { - Reads.Remove(key, out _); - Writes.Add(key); - } - - /// - /// Set MRT namespace only if doesn't already exist. - /// If namespace already exists, verify new namespace is the same. - /// - public void SetNamespace(string ns) - { - if (Ns == null) - { - Ns = ns; - } - else if (!Ns.Equals(ns)) { - throw new AerospikeException("Namespace must be the same for all commands in the MRT. orig: " + - Ns + " new: " + ns); - } - } - - /// - /// Set MRT namespaces for each key only if doesn't already exist. - /// If namespace already exists, verify new namespace is the same. - /// - public void SetNamespace(Key[] keys) - { - foreach (Key key in keys) - { - SetNamespace(key.ns); - } - } - - /// - /// Set MRT namespaces for each key only if doesn't already exist. - /// If namespace already exists, verify new namespace is the same. - /// - public void SetNamespace(List records) - { - foreach (BatchRead br in records) - { - SetNamespace(br.key.ns); - } - } - - /// - /// Set that the MRT monitor existence is in doubt. - /// - public void SetMonitorInDoubt() - { - this.monitorInDoubt = true; - } - - /// - /// Does MRT monitor record exist or is in doubt. - /// - public bool MonitorMightExist() - { - return Deadline != 0 || monitorInDoubt; - } - - /// - /// Does MRT monitor record exist. - /// - public bool MonitorExists() - { - return Deadline != 0; - } - - public bool SetRollAttempted() - { - if (rollAttempted) - { - return false; - } - rollAttempted = true; - return true; - } - - public void Clear() - { - Ns = null; - Deadline = 0; - Reads.Clear(); - Writes.Clear(); - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +using System; +using System.Collections.Concurrent; + +namespace Aerospike.Client +{ + /// + /// Mutli-record transaction (MRT). Each command in the MRT must use the same namespace. + /// + public class Txn + { + public long Id { get; private set; } + public ConcurrentDictionary Reads { get; private set; } + public HashSet Writes { get; private set; } + public string Ns { get; private set; } + public int Deadline { get; set; } + + private bool monitorInDoubt; + + private bool rollAttempted; + + /// + /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with default capacities. + /// + public Txn() + { + Id = CreateId(); + Reads = new ConcurrentDictionary(); + Writes = new HashSet(); + Deadline = 0; + } + + /// + /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with given capacities. + /// + /// expected number of record reads in the MRT. Minimum value is 16. + /// expected number of record writes in the MRT. Minimum value is 16. + public Txn(int readsCapacity, int writesCapacity) + { + if (readsCapacity < 16) + { + readsCapacity = 16; + } + + if (writesCapacity < 16) + { + writesCapacity = 16; + } + + Id = CreateId(); + Reads = new ConcurrentDictionary(100, readsCapacity); // TODO: concurrency level + Writes = new HashSet(writesCapacity); + } + + private static long CreateId() + { + // An id of zero is considered invalid. Create random numbers + // in a loop until non-zero is returned. + Random r = new(); + long id = r.NextInt64(); + + while (id == 0) + { + id = r.NextInt64(); + } + return id; + } + + /// + /// Process the results of a record read. For internal use only. + /// + /// + /// + internal void OnRead(Key key, long? version) + { + if (version.HasValue) + { + Reads.TryAdd(key, version.Value); + } + } + + /// + /// Get record version for a given key. + /// + /// + /// + public long? GetReadVersion(Key key) + { + if (Reads.ContainsKey(key)) + { + return Reads[key]; + } + else + { + return null; + } + } + + /// + /// Process the results of a record write. For internal use only. + /// + /// + /// + /// + public void OnWrite(Key key, long? version, int resultCode) + { + // Write commands set namespace prior to sending the command, so there is + // no need to call it here when receiving the response. + if (version.HasValue) + { + Reads.TryAdd(key, version.Value); + } + else + { + if (resultCode == ResultCode.OK) + { + Reads.Remove(key, out _); + Writes.Add(key); + } + } + } + + /// + /// Add key to write hash when write command is in doubt (usually caused by timeout). + /// + public void OnWriteInDoubt(Key key) + { + Reads.Remove(key, out _); + Writes.Add(key); + } + + /// + /// Set MRT namespace only if doesn't already exist. + /// If namespace already exists, verify new namespace is the same. + /// + public void SetNamespace(string ns) + { + if (Ns == null) + { + Ns = ns; + } + else if (!Ns.Equals(ns)) { + throw new AerospikeException("Namespace must be the same for all commands in the MRT. orig: " + + Ns + " new: " + ns); + } + } + + /// + /// Set MRT namespaces for each key only if doesn't already exist. + /// If namespace already exists, verify new namespace is the same. + /// + public void SetNamespace(Key[] keys) + { + foreach (Key key in keys) + { + SetNamespace(key.ns); + } + } + + /// + /// Set MRT namespaces for each key only if doesn't already exist. + /// If namespace already exists, verify new namespace is the same. + /// + public void SetNamespace(List records) + { + foreach (BatchRead br in records) + { + SetNamespace(br.key.ns); + } + } + + /// + /// Set that the MRT monitor existence is in doubt. + /// + public void SetMonitorInDoubt() + { + this.monitorInDoubt = true; + } + + /// + /// Does MRT monitor record exist or is in doubt. + /// + public bool MonitorMightExist() + { + return Deadline != 0 || monitorInDoubt; + } + + /// + /// Does MRT monitor record exist. + /// + public bool MonitorExists() + { + return Deadline != 0; + } + + public bool SetRollAttempted() + { + if (rollAttempted) + { + return false; + } + rollAttempted = true; + return true; + } + + public void Clear() + { + Ns = null; + Deadline = 0; + Reads.Clear(); + Writes.Clear(); + } + } +} diff --git a/AerospikeTest/Args.cs b/AerospikeTest/Args.cs index e534f3e2..f7bdac28 100644 --- a/AerospikeTest/Args.cs +++ b/AerospikeTest/Args.cs @@ -1,384 +1,474 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using Aerospike.Client; -using Aerospike.Client.Proxy; -using Microsoft.Extensions.Configuration; -using Microsoft.VisualStudio.TestTools.UnitTesting; - -namespace Aerospike.Test -{ - [TestClass] - public class Args - { - public static Args Instance = new Args(); - - public IAerospikeClient client; - public AerospikeClient nativeClient; - public IAsyncClient asyncClient; - public AsyncClient nativeAsync; - public AsyncClientProxy asyncProxy; - public AerospikeClientProxy proxyClient; - public Host[] hosts; - public Host proxyHost; - public int port; - public int proxyPort; - public bool testProxy; - public string user; - public string password; - public int timeout; - public string clusterName; - public string ns; - public string set; - public bool useServicesAlternate; - public string tlsName; - public string proxyTlsName; - public TlsPolicy tlsPolicy; - public TlsPolicy proxyTlsPolicy; - public AuthMode authMode; - public bool singleBin; - public bool enterprise; - public int proxyTotalTimeout; - public int proxySocketTimeout; - - public Args() - { - Log.Disable(); - - var builder = new ConfigurationBuilder().AddJsonFile("settings.json", optional: true, reloadOnChange: true); - IConfigurationRoot section = builder.Build(); - - port = int.Parse(section.GetSection("Port").Value); - proxyPort = int.Parse(section.GetSection("ProxyPort").Value); - testProxy = bool.Parse(section.GetSection("TestProxy").Value); - clusterName = section.GetSection("ClusterName").Value; - user = section.GetSection("User").Value; - password = section.GetSection("Password").Value; - timeout = int.Parse(section.GetSection("Timeout").Value); - ns = section.GetSection("Namespace").Value; - set = section.GetSection("Set").Value; - authMode = (AuthMode)Enum.Parse(typeof(AuthMode), section.GetSection("AuthMode").Value, true); - useServicesAlternate = bool.Parse(section.GetSection("UseServicesAlternate").Value); - - bool tlsEnable = bool.Parse(section.GetSection("TlsEnable").Value); - - if (tlsEnable) - { - tlsName = section.GetSection("TlsName").Value; - tlsPolicy = new TlsPolicy( - section.GetSection("TlsProtocols").Value, - section.GetSection("TlsRevoke").Value, - section.GetSection("TlsClientCertFile").Value, - bool.Parse(section.GetSection("TlsLoginOnly").Value) - ); - } - - bool tlsEnableProxy = bool.Parse(section.GetSection("ProxyTlsEnable").Value); - - if (tlsEnableProxy) - { - proxyTlsName = section.GetSection("ProxyTlsName").Value; - proxyTlsPolicy = new TlsPolicy( - section.GetSection("ProxyTlsProtocols").Value, - section.GetSection("ProxyTlsRevoke").Value, - section.GetSection("ProxyTlsClientCertFile").Value, - bool.Parse(section.GetSection("ProxyTlsLoginOnly").Value) - ); - } - - var hostName = section.GetSection("Host").Value; - if (hostName == null || hostName == String.Empty) - { - hosts = null; - } - else - { - hosts = Host.ParseHosts(hostName, tlsName, port); - } - - proxyHost = Host.ParseHosts(section.GetSection("ProxyHost").Value, proxyTlsName, proxyPort)[0]; - } - - public void Connect() - { - if (testProxy) - { - ConnectProxy(); - } - else - { - ConnectSync(); - ConnectAsync(); - } - } - - private void ConnectSync() - { - ClientPolicy policy = new ClientPolicy(); - policy.clusterName = clusterName; - policy.tlsPolicy = tlsPolicy; - policy.authMode = authMode; - policy.timeout = timeout; - policy.useServicesAlternate = useServicesAlternate; - - if (user != null && user.Length > 0) - { - policy.user = user; - policy.password = password; - } - - nativeClient = new AerospikeClient(policy, hosts); - - nativeClient.readPolicyDefault.totalTimeout = timeout; - nativeClient.WritePolicyDefault.totalTimeout = timeout; - nativeClient.ScanPolicyDefault.totalTimeout = timeout; - nativeClient.QueryPolicyDefault.totalTimeout = timeout; - nativeClient.BatchPolicyDefault.totalTimeout = timeout; - nativeClient.BatchParentPolicyWriteDefault.totalTimeout = timeout; - nativeClient.InfoPolicyDefault.timeout = timeout; - client = nativeClient; - - //Example of how to enable metrics - //client.EnableMetrics(new MetricsPolicy()); - - try - { - SetServerSpecific(); - } - catch - { - client.Close(); - client = null; - throw; - } - } - - private void ConnectProxy() - { - ClientPolicy policy = new ClientPolicy(); - ClientPolicy proxyPolicy = new ClientPolicy(); - AsyncClientPolicy asyncPolicy = new AsyncClientPolicy(); - AsyncClientPolicy proxyAsyncPolicy = new AsyncClientPolicy(); - policy.clusterName = clusterName; - proxyPolicy.clusterName = clusterName; - asyncPolicy.clusterName = clusterName; - proxyAsyncPolicy.clusterName = clusterName; - policy.tlsPolicy = tlsPolicy; - proxyPolicy.tlsPolicy = proxyTlsPolicy; - asyncPolicy.tlsPolicy = tlsPolicy; - proxyAsyncPolicy.tlsPolicy = proxyTlsPolicy; - policy.authMode = authMode; - proxyPolicy.authMode = authMode; - asyncPolicy.authMode = authMode; - proxyAsyncPolicy.authMode = authMode; - proxyPolicy.minConnsPerNode = 100; - proxyAsyncPolicy.minConnsPerNode = 100; - proxyPolicy.maxConnsPerNode = 100; - proxyAsyncPolicy.maxConnsPerNode = 100; - proxyPolicy.timeout = timeout; - proxyAsyncPolicy.timeout = timeout; - policy.useServicesAlternate = useServicesAlternate; - proxyPolicy.useServicesAlternate = useServicesAlternate; - asyncPolicy.useServicesAlternate = useServicesAlternate; - proxyAsyncPolicy.useServicesAlternate = useServicesAlternate; - - - if (user != null && user.Length > 0) - { - policy.user = user; - policy.password = password; - proxyPolicy.user = user; - proxyPolicy.password = password; - asyncPolicy.user = user; - asyncPolicy.password = password; - proxyAsyncPolicy.user = user; - proxyAsyncPolicy.password = password; - } - - asyncPolicy.asyncMaxCommands = 300; - proxyAsyncPolicy.asyncMaxCommands = 300; - - proxyClient = new AerospikeClientProxy(proxyPolicy, proxyHost); - if (hosts != null) - { - nativeClient = new AerospikeClient(policy, hosts); - nativeAsync = new AsyncClient(asyncPolicy, hosts); - } - else - { - nativeClient = null; - nativeAsync = null; - } - - asyncProxy = new AsyncClientProxy(proxyAsyncPolicy, proxyHost); - asyncClient = asyncProxy; - - proxyTotalTimeout = timeout; - proxySocketTimeout = 5000; - - proxyClient.readPolicyDefault.totalTimeout = proxyTotalTimeout; - proxyClient.readPolicyDefault.socketTimeout = proxySocketTimeout; - proxyClient.WritePolicyDefault.totalTimeout = proxyTotalTimeout; - proxyClient.WritePolicyDefault.socketTimeout = proxySocketTimeout; - proxyClient.ScanPolicyDefault.totalTimeout = proxyTotalTimeout; - proxyClient.ScanPolicyDefault.socketTimeout = proxySocketTimeout; - proxyClient.QueryPolicyDefault.totalTimeout = proxyTotalTimeout; - proxyClient.QueryPolicyDefault.socketTimeout = proxySocketTimeout; - proxyClient.BatchPolicyDefault.totalTimeout = proxyTotalTimeout; - proxyClient.BatchPolicyDefault.socketTimeout = proxySocketTimeout; - proxyClient.BatchParentPolicyWriteDefault.totalTimeout = proxyTotalTimeout; - proxyClient.BatchParentPolicyWriteDefault.socketTimeout = proxySocketTimeout; - proxyClient.InfoPolicyDefault.timeout = proxyTotalTimeout; - - asyncProxy.ReadPolicyDefault = proxyClient.ReadPolicyDefault; - asyncProxy.WritePolicyDefault = proxyClient.WritePolicyDefault; - asyncProxy.ScanPolicyDefault = proxyClient.ScanPolicyDefault; - asyncProxy.QueryPolicyDefault = proxyClient.QueryPolicyDefault; - asyncProxy.BatchPolicyDefault = proxyClient.BatchPolicyDefault; - asyncProxy.BatchParentPolicyWriteDefault = proxyClient.BatchParentPolicyWriteDefault; - asyncProxy.InfoPolicyDefault = proxyClient.InfoPolicyDefault; - - if (nativeClient != null) - { - nativeClient.ReadPolicyDefault = proxyClient.ReadPolicyDefault; - nativeClient.WritePolicyDefault = proxyClient.WritePolicyDefault; - nativeClient.ScanPolicyDefault = proxyClient.ScanPolicyDefault; - nativeClient.QueryPolicyDefault = proxyClient.QueryPolicyDefault; - nativeClient.BatchPolicyDefault = proxyClient.BatchPolicyDefault; - nativeClient.BatchParentPolicyWriteDefault = proxyClient.BatchParentPolicyWriteDefault; - nativeClient.InfoPolicyDefault = proxyClient.InfoPolicyDefault; - - asyncClient.ReadPolicyDefault = proxyClient.ReadPolicyDefault; - asyncClient.WritePolicyDefault = proxyClient.WritePolicyDefault; - asyncClient.ScanPolicyDefault = proxyClient.ScanPolicyDefault; - asyncClient.QueryPolicyDefault = proxyClient.QueryPolicyDefault; - asyncClient.BatchPolicyDefault = proxyClient.BatchPolicyDefault; - asyncClient.BatchParentPolicyWriteDefault = proxyClient.BatchParentPolicyWriteDefault; - asyncClient.InfoPolicyDefault = proxyClient.InfoPolicyDefault; - } - - client = proxyClient; - - try - { - if (nativeClient != null) - { - SetServerSpecific(); - } - } - catch - { - client.Close(); - client = null; - throw; - } - } - - private void ConnectAsync() - { - AsyncClientPolicy policy = new AsyncClientPolicy(); - policy.clusterName = clusterName; - policy.tlsPolicy = tlsPolicy; - policy.authMode = authMode; - policy.asyncMaxCommands = 300; - policy.timeout = timeout; - policy.useServicesAlternate = useServicesAlternate; - - if (user != null && user.Length > 0) - { - policy.user = user; - policy.password = password; - } - - nativeAsync = new AsyncClient(policy, hosts); - - nativeAsync.readPolicyDefault.totalTimeout = timeout; - nativeAsync.WritePolicyDefault.totalTimeout = timeout; - nativeAsync.ScanPolicyDefault.totalTimeout = timeout; - nativeAsync.QueryPolicyDefault.totalTimeout = timeout; - nativeAsync.BatchPolicyDefault.totalTimeout = timeout; - nativeAsync.BatchParentPolicyWriteDefault.totalTimeout = timeout; - nativeAsync.InfoPolicyDefault.timeout = timeout; - - asyncClient = nativeAsync; - - // Example of how to enable metrics - //asyncClient.EnableMetrics(new MetricsPolicy()); - } - - private void SetServerSpecific() - { - Node node = nativeClient.Nodes[0]; - string namespaceFilter = "namespace/" + ns; - Dictionary map = Info.Request(null, node, "edition", namespaceFilter); - - string edition = map["edition"]; - enterprise = edition.Equals("Aerospike Enterprise Edition"); - - string namespaceTokens = map[namespaceFilter]; - - if (namespaceTokens == null) - { - throw new Exception(string.Format("Failed to get namespace info: host={0} namespace={1}", node, ns)); - } - - singleBin = ParseBoolean(namespaceTokens, "single-bin"); - } - - private static bool ParseBoolean(String namespaceTokens, String name) - { - string search = name + '='; - int begin = namespaceTokens.IndexOf(search); - - if (begin < 0) - { - return false; - } - - begin += search.Length; - int end = namespaceTokens.IndexOf(';', begin); - - if (end < 0) - { - end = namespaceTokens.Length; - } - - string value = namespaceTokens.Substring(begin, end - begin); - return Convert.ToBoolean(value); - } - - public string GetBinName(string name) - { - // Single bin servers don't need a bin name. - return singleBin ? "" : name; - } - - public void Close() - { - if (client != null) - { - client.Close(); - client = null; - } - - if (asyncClient != null) - { - asyncClient.Close(); - asyncClient = null; - } - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Aerospike.Client; +using Aerospike.Client.Proxy; +using Microsoft.Extensions.Configuration; +using Microsoft.VisualStudio.TestTools.UnitTesting; + +namespace Aerospike.Test +{ + [TestClass] + public class Args + { + public static Args Instance = new Args(); + + public IAerospikeClient client; + public AerospikeClient nativeClient; + public IAsyncClient asyncClient; + public AsyncClient nativeAsync; + public AsyncClientProxy asyncProxy; + public AerospikeClientProxy proxyClient; + public Host[] hosts; + public Host proxyHost; + public int port; + public int proxyPort; + public bool testProxy; + public string user; + public string password; + public int timeout; + public string clusterName; + public string ns; + public string set; + public bool useServicesAlternate; + public string tlsName; + public string proxyTlsName; + public TlsPolicy tlsPolicy; + public TlsPolicy proxyTlsPolicy; + public AuthMode authMode; + public bool singleBin; + public bool enterprise; + public int proxyTotalTimeout; + public int proxySocketTimeout; + + public Args() + { + Log.Disable(); + + var builder = new ConfigurationBuilder().AddJsonFile("settings.json", optional: true, reloadOnChange: true); + IConfigurationRoot section = builder.Build(); + + port = int.Parse(section.GetSection("Port").Value); + proxyPort = int.Parse(section.GetSection("ProxyPort").Value); + testProxy = bool.Parse(section.GetSection("TestProxy").Value); + clusterName = section.GetSection("ClusterName").Value; + user = section.GetSection("User").Value; + password = section.GetSection("Password").Value; + timeout = int.Parse(section.GetSection("Timeout").Value); + ns = section.GetSection("Namespace").Value; + set = section.GetSection("Set").Value; + authMode = (AuthMode)Enum.Parse(typeof(AuthMode), section.GetSection("AuthMode").Value, true); + useServicesAlternate = bool.Parse(section.GetSection("UseServicesAlternate").Value); + + bool tlsEnable = bool.Parse(section.GetSection("TlsEnable").Value); + + if (tlsEnable) + { + tlsName = section.GetSection("TlsName").Value; + tlsPolicy = new TlsPolicy( + section.GetSection("TlsProtocols").Value, + section.GetSection("TlsRevoke").Value, + section.GetSection("TlsClientCertFile").Value, + bool.Parse(section.GetSection("TlsLoginOnly").Value) + ); + } + + bool tlsEnableProxy = bool.Parse(section.GetSection("ProxyTlsEnable").Value); + + if (tlsEnableProxy) + { + proxyTlsName = section.GetSection("ProxyTlsName").Value; + proxyTlsPolicy = new TlsPolicy( + section.GetSection("ProxyTlsProtocols").Value, + section.GetSection("ProxyTlsRevoke").Value, + section.GetSection("ProxyTlsClientCertFile").Value, + bool.Parse(section.GetSection("ProxyTlsLoginOnly").Value) + ); + } + + var hostName = section.GetSection("Host").Value; + if (hostName == null || hostName == String.Empty) + { + hosts = null; + } + else + { + hosts = Host.ParseHosts(hostName, tlsName, port); + } + + proxyHost = Host.ParseHosts(section.GetSection("ProxyHost").Value, proxyTlsName, proxyPort)[0]; + } + + public void Connect() + { + if (testProxy) + { + ConnectProxy(); + } + else + { + ConnectSync(); + ConnectAsync(); + } + } + + private void ConnectSync() + { + ClientPolicy policy = new ClientPolicy(); + policy.clusterName = clusterName; + policy.tlsPolicy = tlsPolicy; + policy.authMode = authMode; + policy.timeout = timeout; + policy.useServicesAlternate = useServicesAlternate; + + if (user != null && user.Length > 0) + { + policy.user = user; + policy.password = password; + } + + nativeClient = new AerospikeClient(policy, hosts); + + nativeClient.ReadPolicyDefault = new() + { + totalTimeout = timeout + }; + nativeClient.WritePolicyDefault = new() + { + totalTimeout = timeout + }; + nativeClient.ScanPolicyDefault = new() + { + totalTimeout = timeout + }; + nativeClient.QueryPolicyDefault = new() + { + totalTimeout = timeout + }; + nativeClient.BatchPolicyDefault = new() + { + totalTimeout = timeout + }; + nativeClient.BatchParentPolicyWriteDefault = new() + { + totalTimeout = timeout + }; + nativeClient.TxnVerifyPolicyDefault = new() + { + totalTimeout = timeout + }; + nativeClient.TxnRollPolicyDefault = new() + { + totalTimeout = timeout + }; + nativeClient.InfoPolicyDefault = new() + { + timeout = timeout + }; + + client = nativeClient; + + //Example of how to enable metrics + //client.EnableMetrics(new MetricsPolicy()); + + try + { + SetServerSpecific(); + } + catch + { + client.Close(); + client = null; + throw; + } + } + + private void ConnectProxy() + { + ClientPolicy policy = new ClientPolicy(); + ClientPolicy proxyPolicy = new ClientPolicy(); + AsyncClientPolicy asyncPolicy = new AsyncClientPolicy(); + AsyncClientPolicy proxyAsyncPolicy = new AsyncClientPolicy(); + policy.clusterName = clusterName; + proxyPolicy.clusterName = clusterName; + asyncPolicy.clusterName = clusterName; + proxyAsyncPolicy.clusterName = clusterName; + policy.tlsPolicy = tlsPolicy; + proxyPolicy.tlsPolicy = proxyTlsPolicy; + asyncPolicy.tlsPolicy = tlsPolicy; + proxyAsyncPolicy.tlsPolicy = proxyTlsPolicy; + policy.authMode = authMode; + proxyPolicy.authMode = authMode; + asyncPolicy.authMode = authMode; + proxyAsyncPolicy.authMode = authMode; + proxyPolicy.minConnsPerNode = 100; + proxyAsyncPolicy.minConnsPerNode = 100; + proxyPolicy.maxConnsPerNode = 100; + proxyAsyncPolicy.maxConnsPerNode = 100; + proxyPolicy.timeout = timeout; + proxyAsyncPolicy.timeout = timeout; + policy.useServicesAlternate = useServicesAlternate; + proxyPolicy.useServicesAlternate = useServicesAlternate; + asyncPolicy.useServicesAlternate = useServicesAlternate; + proxyAsyncPolicy.useServicesAlternate = useServicesAlternate; + + + if (user != null && user.Length > 0) + { + policy.user = user; + policy.password = password; + proxyPolicy.user = user; + proxyPolicy.password = password; + asyncPolicy.user = user; + asyncPolicy.password = password; + proxyAsyncPolicy.user = user; + proxyAsyncPolicy.password = password; + } + + asyncPolicy.asyncMaxCommands = 300; + proxyAsyncPolicy.asyncMaxCommands = 300; + + proxyClient = new AerospikeClientProxy(proxyPolicy, proxyHost); + if (hosts != null) + { + nativeClient = new AerospikeClient(policy, hosts); + nativeAsync = new AsyncClient(asyncPolicy, hosts); + } + else + { + nativeClient = null; + nativeAsync = null; + } + + asyncProxy = new AsyncClientProxy(proxyAsyncPolicy, proxyHost); + asyncClient = asyncProxy; + + proxyTotalTimeout = timeout; + proxySocketTimeout = 5000; + + proxyClient.ReadPolicyDefault = new() + { + totalTimeout = proxyTotalTimeout, + socketTimeout = proxySocketTimeout + }; + proxyClient.WritePolicyDefault = new() + { + totalTimeout = proxyTotalTimeout, + socketTimeout = proxySocketTimeout + }; + proxyClient.ScanPolicyDefault = new() + { + totalTimeout = proxyTotalTimeout, + socketTimeout = proxySocketTimeout + }; + proxyClient.QueryPolicyDefault = new() + { + totalTimeout = proxyTotalTimeout, + socketTimeout = proxySocketTimeout + }; + proxyClient.BatchPolicyDefault = new() + { + totalTimeout = proxyTotalTimeout, + socketTimeout = proxySocketTimeout + }; + proxyClient.BatchParentPolicyWriteDefault = new() + { + totalTimeout = proxyTotalTimeout, + socketTimeout = proxySocketTimeout + }; + proxyClient.TxnVerifyPolicyDefault = new() + { + totalTimeout = proxyTotalTimeout, + socketTimeout = proxySocketTimeout + }; + proxyClient.TxnRollPolicyDefault = new() + { + totalTimeout = proxyTotalTimeout, + socketTimeout = proxySocketTimeout + }; + proxyClient.InfoPolicyDefault = new() + { + timeout = proxyTotalTimeout + }; + + asyncProxy.ReadPolicyDefault = proxyClient.ReadPolicyDefault; + asyncProxy.WritePolicyDefault = proxyClient.WritePolicyDefault; + asyncProxy.ScanPolicyDefault = proxyClient.ScanPolicyDefault; + asyncProxy.QueryPolicyDefault = proxyClient.QueryPolicyDefault; + asyncProxy.BatchPolicyDefault = proxyClient.BatchPolicyDefault; + asyncProxy.BatchParentPolicyWriteDefault = proxyClient.BatchParentPolicyWriteDefault; + asyncProxy.InfoPolicyDefault = proxyClient.InfoPolicyDefault; + + if (nativeClient != null) + { + nativeClient.ReadPolicyDefault = proxyClient.ReadPolicyDefault; + nativeClient.WritePolicyDefault = proxyClient.WritePolicyDefault; + nativeClient.ScanPolicyDefault = proxyClient.ScanPolicyDefault; + nativeClient.QueryPolicyDefault = proxyClient.QueryPolicyDefault; + nativeClient.BatchPolicyDefault = proxyClient.BatchPolicyDefault; + nativeClient.BatchParentPolicyWriteDefault = proxyClient.BatchParentPolicyWriteDefault; + nativeClient.InfoPolicyDefault = proxyClient.InfoPolicyDefault; + + asyncClient.ReadPolicyDefault = proxyClient.ReadPolicyDefault; + asyncClient.WritePolicyDefault = proxyClient.WritePolicyDefault; + asyncClient.ScanPolicyDefault = proxyClient.ScanPolicyDefault; + asyncClient.QueryPolicyDefault = proxyClient.QueryPolicyDefault; + asyncClient.BatchPolicyDefault = proxyClient.BatchPolicyDefault; + asyncClient.BatchParentPolicyWriteDefault = proxyClient.BatchParentPolicyWriteDefault; + asyncClient.InfoPolicyDefault = proxyClient.InfoPolicyDefault; + } + + client = proxyClient; + + try + { + if (nativeClient != null) + { + SetServerSpecific(); + } + } + catch + { + client.Close(); + client = null; + throw; + } + } + + private void ConnectAsync() + { + AsyncClientPolicy policy = new AsyncClientPolicy(); + policy.clusterName = clusterName; + policy.tlsPolicy = tlsPolicy; + policy.authMode = authMode; + policy.asyncMaxCommands = 300; + policy.timeout = timeout; + policy.useServicesAlternate = useServicesAlternate; + + if (user != null && user.Length > 0) + { + policy.user = user; + policy.password = password; + } + + nativeAsync = new AsyncClient(policy, hosts); + + nativeAsync.ReadPolicyDefault = new() + { + totalTimeout = timeout + }; + nativeAsync.WritePolicyDefault = new() + { + totalTimeout = timeout + }; + nativeAsync.ScanPolicyDefault = new() + { + totalTimeout = timeout + }; + nativeAsync.QueryPolicyDefault = new() + { + totalTimeout = timeout + }; + nativeAsync.BatchPolicyDefault = new() + { + totalTimeout = timeout + }; + nativeAsync.BatchParentPolicyWriteDefault = new() + { + totalTimeout = timeout + }; + nativeAsync.TxnVerifyPolicyDefault = new() + { + totalTimeout = timeout + }; + nativeAsync.TxnRollPolicyDefault = new() + { + totalTimeout = timeout + }; + nativeAsync.InfoPolicyDefault = new() + { + timeout = timeout + }; + + asyncClient = nativeAsync; + + // Example of how to enable metrics + //asyncClient.EnableMetrics(new MetricsPolicy()); + } + + private void SetServerSpecific() + { + Node node = nativeClient.Nodes[0]; + string namespaceFilter = "namespace/" + ns; + Dictionary map = Info.Request(null, node, "edition", namespaceFilter); + + string edition = map["edition"]; + enterprise = edition.Equals("Aerospike Enterprise Edition"); + + string namespaceTokens = map[namespaceFilter]; + + if (namespaceTokens == null) + { + throw new Exception(string.Format("Failed to get namespace info: host={0} namespace={1}", node, ns)); + } + + singleBin = ParseBoolean(namespaceTokens, "single-bin"); + } + + private static bool ParseBoolean(String namespaceTokens, String name) + { + string search = name + '='; + int begin = namespaceTokens.IndexOf(search); + + if (begin < 0) + { + return false; + } + + begin += search.Length; + int end = namespaceTokens.IndexOf(';', begin); + + if (end < 0) + { + end = namespaceTokens.Length; + } + + string value = namespaceTokens.Substring(begin, end - begin); + return Convert.ToBoolean(value); + } + + public string GetBinName(string name) + { + // Single bin servers don't need a bin name. + return singleBin ? "" : name; + } + + public void Close() + { + if (client != null) + { + client.Close(); + client = null; + } + + if (asyncClient != null) + { + asyncClient.Close(); + asyncClient = null; + } + } + } +} diff --git a/AerospikeTest/Async/TestAsyncTxn.cs b/AerospikeTest/Async/TestAsyncTxn.cs index 47319d72..c52b7289 100644 --- a/AerospikeTest/Async/TestAsyncTxn.cs +++ b/AerospikeTest/Async/TestAsyncTxn.cs @@ -1,674 +1,998 @@ -/* - * Copyright 2012-2018 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Aerospike.Client; -using System.Reflection; -using System.Text; -using static Aerospike.Client.CommitStatus; -using static Aerospike.Client.AbortStatus; - -namespace Aerospike.Test -{ - [TestClass] - public class TestAsyncTxn : TestAsync - { - private static readonly string binName = "bin"; - - [ClassInitialize()] - public static void Prepare(TestContext testContext) - { - if (!args.testProxy || (args.testProxy && nativeClient != null)) - { - Assembly assembly = Assembly.GetExecutingAssembly(); - RegisterTask task = nativeClient.Register(null, assembly, "Aerospike.Test.LuaResources.record_example.lua", "record_example.lua", Language.LUA); - task.Wait(); - } - } - - [TestMethod] - public void AsyncTxnWrite() - { - Key key = new(args.ns, args.set, "asyncTxnWrite"); - - client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Put(wp, new PutHandler(this), key, new Bin(binName, "val2")); - - client.Commit(new CommitHandler(this), txn); - - client.Get(null, new GetExpectHandler(this, "val2"), key); - } - - [TestMethod] - public void AsyncTxnWriteTwice() - { - Key key = new(args.ns, args.set, "asyncTxnWriteTwice"); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Put(wp, new PutHandler(this), key, new Bin(binName, "val1")); - client.Put(wp, new PutHandler(this), key, new Bin(binName, "val2")); - - client.Commit(new CommitHandler(this), txn); - - client.Get(null, new GetExpectHandler(this, "val2"), key); - } - - [TestMethod] - public void AsyncTxnWriteBlock() - { - Key key = new(args.ns, args.set, "asyncTxnWriteBlock"); - - client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Put(wp, new PutHandler(this), key, new Bin(binName, "val2")); - - try - { - // This write should be blocked. - client.Put(null, new PutHandler(this), key, new Bin(binName, "val3")); - throw new AerospikeException("Unexpected success"); - } - catch (AerospikeException e) - { - if (e.Result != ResultCode.MRT_BLOCKED) - { - throw e; - } - } - - client.Commit(new CommitHandler(this), txn); - } - - [TestMethod] - public void AsyncTxnWriteRead() - { - Key key = new(args.ns, args.set, "asyncTxnWriteRead"); - - client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Put(wp, new PutHandler(this), key, new Bin(binName, "val2")); - - client.Get(null, new GetExpectHandler(this, "val1"), key); - - client.Commit(new CommitHandler(this), txn); - - client.Get(null, new GetExpectHandler(this, "val2"), key); - } - - [TestMethod] - public void AsyncTxnWriteAbort() - { - Key key = new(args.ns, args.set, "asyncTxnWriteAbort"); - - client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Put(wp, new PutHandler(this), key, new Bin(binName, "val2")); - - Policy p = client.ReadPolicyDefault; - p.Txn = txn; - client.Get(p, new GetExpectHandler(this, "val2"), key); - - client.Abort(new AbortHandler(this), txn); - - client.Get(null, new GetExpectHandler(this, "val1"), key); - } - - [TestMethod] - public void AsyncTxnDelete() - { - Key key = new(args.ns, args.set, "asyncTxnDelete"); - - client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - wp.durableDelete = true; - client.Delete(wp, new DeleteHandler(this), key); - - client.Commit(new CommitHandler(this), txn); - - client.Get(null, new GetExpectHandler(this, null), key); - } - - [TestMethod] - public void AsyncTxnDeleteAbort() - { - Key key = new(args.ns, args.set, "asyncTxnDeleteAbort"); - - client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - wp.durableDelete = true; - client.Delete(wp, new DeleteHandler(this), key); - - client.Abort(new AbortHandler(this), txn); - - client.Get(null, new GetExpectHandler(this, "val1"), key); - } - - [TestMethod] - public void AsyncTxnDeleteTwice() - { - Key key = new(args.ns, args.set, "asyncTxnDeleteTwice"); - - Txn txn = new(); - - client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - wp.durableDelete = true; - client.Delete(wp, new DeleteHandler(this), key); - client.Delete(wp, new DeleteHandler(this), key); - - client.Commit(new CommitHandler(this), txn); - - client.Get(null, new GetExpectHandler(this, null), key); - } - - [TestMethod] - public void AsyncTxnTouch() - { - Key key = new(args.ns, args.set, "asyncTxnTouch"); - - client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Touch(wp, new TouchHandler(this), key); - - client.Commit(new CommitHandler(this), txn); - - client.Get(null, new GetExpectHandler(this, "val1"), key); - } - - [TestMethod] - public void AsyncTxnTouchAbort() - { - Key key = new(args.ns, args.set, "asyncTxnTouchAbort"); - - client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Touch(wp, new TouchHandler(this), key); - - client.Abort(new AbortHandler(this), txn); - - client.Get(null, new GetExpectHandler(this, "val1"), key); - } - - [TestMethod] - public void AsyncTxnOperateWrite() - { - Key key = new(args.ns, args.set, "asyncTxnOperateWrite3"); - Bin bin2 = new("bin2", "bal1"); - - client.Put(null, new PutHandler(this), key, new Bin(binName, "val1"), bin2); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Operate(wp, new OperateExpectHandler(this, bin2), key, - Operation.Put(new Bin(binName, "val2")), - Operation.Get("bin2") - ); - - client.Commit(new CommitHandler(this), txn); - - client.Get(null, new GetExpectHandler(this, "val2"), key); - } - - [TestMethod] - public void AsyncTxnOperateWriteAbort() - { - Key key = new(args.ns, args.set, "asyncTxnOperateWriteAbort"); - Bin bin2 = new("bin2", "bal1"); - - client.Put(null, new PutHandler(this), key, new Bin(binName, "val1"), bin2); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Operate(wp, new OperateExpectHandler(this, bin2), key, - Operation.Put(new Bin(binName, "val2")), - Operation.Get(bin2.name) - ); - - client.Abort(new AbortHandler(this), txn); - - client.Get(null, new GetExpectHandler(this, "val1"), key); - } - - [TestMethod] - public void AsyncTxnUDF() - { - Key key = new(args.ns, args.set, "asyncTxnUDF"); - Bin bin2 = new("bin2", "bal1"); - - client.Put(null, new PutHandler(this), key, new Bin(binName, "val1"), bin2); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Execute(wp, new UDFHandler(this), key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); - - client.Commit(new CommitHandler(this), txn); - - client.Get(null, new GetExpectHandler(this, "val2"), key); - } - - [TestMethod] - public void AsyncTxnUDFAbort() - { - Key key = new(args.ns, args.set, "asyncTxnUDFAbort"); - Bin bin2 = new("bin2", "bal1"); - - client.Put(null, new PutHandler(this), key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Execute(wp, new UDFHandler(this), key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); - - client.Abort(new AbortHandler(this), txn); - - client.Get(null, new GetExpectHandler(this, "val1"), key); - } - - [TestMethod] - public void AsyncTxnBatch() - { - Key[] keys = new Key[10]; - Bin bin = new(binName, 1); - - for (int i = 0; i < keys.Length; i++) - { - Key key = new(args.ns, args.set, "asyncTxnBatch" + i); - keys[i] = key; - - client.Put(null, key, bin); - } - - client.Get(null, new BatchGetExpectHandler(this, 1), keys); - - Txn txn = new(); - - bin = new(binName, 2); - - BatchPolicy bp = BatchPolicy.WriteDefault(); - bp.Txn = txn; - - client.Operate(bp, null, new BatchOperateHandler(this), keys, Operation.Put(bin)); - - client.Commit(new CommitHandler(this), txn); - - client.Get(null, new BatchGetExpectHandler(this, 1), keys); - } - - [TestMethod] - public void AsyncTxnBatchAbort() - { - var keys = new Key[10]; - Bin bin = new(binName, 1); - - for (int i = 0; i < keys.Length; i++) - { - Key key = new(args.ns, args.set, "asyncTxnBatch" + i); - keys[i] = key; - - client.Put(null, key, bin); - } - - client.Get(null, new BatchGetExpectHandler(this, 1), keys); - - Txn txn = new(); - - bin = new Bin(binName, 2); - - BatchPolicy bp = BatchPolicy.WriteDefault(); - bp.Txn = txn; - - client.Operate(bp, null, new BatchOperateHandler(this), keys, Operation.Put(bin)); - - client.Abort(new AbortHandler(this), txn); - - client.Get(null, new BatchGetExpectHandler(this, 1), keys); - } - - private class CommitHandler : CommitListener - { - private readonly TestAsyncTxn parent; - - public CommitHandler(TestAsyncTxn parent) - { - this.parent = parent; - } - - public void OnSuccess(CommitStatusType status) - { - parent.NotifyCompleted(); - } - - public void OnFailure(AerospikeException.Commit e) - { - parent.SetError(e); - parent.NotifyCompleted(); - } - } - - private class AbortHandler : AbortListener - { - private readonly TestAsyncTxn parent; - - public AbortHandler(TestAsyncTxn parent) - { - this.parent = parent; - } - - public void OnSuccess(AbortStatusType status) - { - parent.NotifyCompleted(); - } - - public void OnFailure(AerospikeException e) - { - parent.SetError(e); - parent.NotifyCompleted(); - } - } - - private class PutHandler : WriteListener - { - private readonly TestAsyncTxn parent; - - public PutHandler(TestAsyncTxn parent) - { - this.parent = parent; - } - - public void OnSuccess(Key key) - { - parent.NotifyCompleted(); - } - - public void OnFailure(AerospikeException e) - { - parent.SetError(e); - parent.NotifyCompleted(); - } - } - - private class GetExpectHandler : RecordListener - { - private readonly TestAsyncTxn parent; - private string expect; - - public GetExpectHandler(TestAsyncTxn parent, string expect) - { - this.parent = parent; - this.expect = expect; - } - - public void OnSuccess(Key key, Record record) - { - if (expect != null) - { - if (parent.AssertBinEqual(key, record, binName, expect)) - { - parent.NotifyCompleted(); - } - else - { - parent.NotifyCompleted(); - } - } - else - { - if (parent.AssertRecordNotFound(key, record)) - { - parent.NotifyCompleted(); - } - else - { - parent.NotifyCompleted(); - } - } - } - - public void OnFailure(AerospikeException e) - { - parent.SetError(e); - parent.NotifyCompleted(); - } - } - - private class OperateExpectHandler : RecordListener - { - private readonly TestAsyncTxn parent; - private Bin? expect; - - public OperateExpectHandler(TestAsyncTxn parent, Bin? expect) - { - this.parent = parent; - this.expect = expect; - } - - public void OnSuccess(Key key, Record record) - { - if (expect != null) - { - if (parent.AssertBinEqual(key, record, expect?.name, expect?.value.Object)) - { - parent.NotifyCompleted(); - } - else - { - parent.NotifyCompleted(); - } - } - else - { - if (parent.AssertRecordNotFound(key, record)) - { - parent.NotifyCompleted(); - } - else - { - parent.NotifyCompleted(); - } - } - } - - public void OnFailure(AerospikeException e) - { - parent.SetError(e); - parent.NotifyCompleted(); - } - } - - private class UDFHandler : ExecuteListener - { - private readonly TestAsyncTxn parent; - - public UDFHandler(TestAsyncTxn parent) - { - this.parent = parent; - } - - public void OnSuccess(Key key, Object obj) - { - parent.NotifyCompleted(); - } - - public void OnFailure(AerospikeException e) - { - parent.SetError(e); - parent.NotifyCompleted(); - } - } - - private class BatchGetExpectHandler : RecordArrayListener - { - private readonly TestAsyncTxn parent; - private readonly int expected; - - public BatchGetExpectHandler(TestAsyncTxn parent, int expected) - { - this.parent = parent; - this.expected = expected; - } - - public void OnSuccess(Key[] keys, Record[] records) - { - if (parent.AssertBatchEqual(keys, records, binName, expected)) - { - parent.NotifyCompleted(); - } - else - { - parent.NotifyCompleted(); - } - } - - public void OnFailure(AerospikeException e) - { - parent.SetError(e); - parent.NotifyCompleted(); - } - } - - private class BatchOperateHandler : BatchRecordArrayListener - { - private TestAsyncTxn parent; - - public BatchOperateHandler(TestAsyncTxn parent) - { - this.parent = parent; - } - - public void OnSuccess(BatchRecord[] records, bool status) - { - if (status) - { - parent.NotifyCompleted(); - } - else - { - StringBuilder sb = new StringBuilder(); - sb.Append("Batch failed:"); - sb.Append(System.Environment.NewLine); - - foreach (BatchRecord br in records) - { - if (br.resultCode == 0) - { - sb.Append("Record: " + br.record); - } - else - { - sb.Append("ResultCode: " + br.resultCode); - } - sb.Append(System.Environment.NewLine); - } - parent.SetError(new AerospikeException(sb.ToString())); - parent.NotifyCompleted(); - } - } - - public void OnFailure(BatchRecord[] records, AerospikeException e) - { - parent.SetError(e); - parent.NotifyCompleted(); - } - } - - private class TouchHandler : WriteListener - { - private TestAsyncTxn parent; - - public TouchHandler(TestAsyncTxn parent) - { - this.parent = parent; - } - - public void OnSuccess(Key key) - { - parent.NotifyCompleted(); - } - - public void OnFailure(AerospikeException e) - { - parent.SetError(e); - parent.NotifyCompleted(); - } - } - - private class DeleteHandler : DeleteListener - { - private TestAsyncTxn parent; - - public DeleteHandler(TestAsyncTxn parent) - { - this.parent = parent; - } - - public void OnSuccess(Key key, bool existed) - { - parent.NotifyCompleted(); - } - - public void OnFailure(AerospikeException e) - { - parent.SetError(e); - parent.NotifyCompleted(); - } - } - } -} +/* + * Copyright 2012-2018 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Aerospike.Client; +using System.Reflection; +using System.Text; +using static Aerospike.Client.CommitStatus; +using static Aerospike.Client.AbortStatus; + +namespace Aerospike.Test +{ + [TestClass] + public class TestAsyncTxn : TestAsync + { + private static readonly string binName = "bin"; + + [ClassInitialize()] + public static void Prepare(TestContext testContext) + { + if (!args.testProxy || (args.testProxy && nativeClient != null)) + { + Assembly assembly = Assembly.GetExecutingAssembly(); + RegisterTask task = nativeClient.Register(null, assembly, "Aerospike.Test.LuaResources.record_example.lua", "record_example.lua", Language.LUA); + task.Wait(); + } + } + + [TestMethod] + public void AsyncTxnWrite() + { + Key key = new(args.ns, args.set, "asyncTxnWrite"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnWriteTwice() + { + Key key = new(args.ns, args.set, "asyncTxnWriteTwice"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(txn, key, "val1"), + new Put(txn, key, "val2"), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnWriteBlock() + { + Key key = new(args.ns, args.set, "asyncTxnWriteBlock1111"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new Put(null, key, "val3", ResultCode.MRT_BLOCKED), // Should be blocked + new Commit(txn), + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnWriteRead() + { + Key key = new(args.ns, args.set, "asyncTxnWriteRead"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new GetExpect(null, key, "val1"), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnWriteAbort() + { + Key key = new(args.ns, args.set, "asyncTxnWriteAbort"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new GetExpect(txn, key, "val2"), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnDelete() + { + Key key = new(args.ns, args.set, "asyncTxnDelete"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Delete(txn, key), + new Commit(txn), + new GetExpect(null, key, null) + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnDeleteAbort() + { + Key key = new(args.ns, args.set, "asyncTxnDeleteAbort"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Delete(txn, key), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnDeleteTwice() + { + Key key = new(args.ns, args.set, "asyncTxnDeleteTwice"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Delete(txn, key), + new Delete(txn, key), + new Commit(txn), + new GetExpect(null, key, null) + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnTouch() + { + Key key = new(args.ns, args.set, "asyncTxnTouch"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Touch(txn, key), + new Commit(txn), + new GetExpect(null, key, "val1") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnTouchAbort() + { + Key key = new(args.ns, args.set, "asyncTxnTouchAbort"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Touch(txn, key), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnOperateWrite() + { + Key key = new(args.ns, args.set, "asyncTxnOperateWrite3"); + Txn txn = new(); + Bin bin2 = new("bin2", "bal1"); + + var cmds = new Runner[] + { + new Put(null, key, new Bin(binName, "val1"), bin2), + new OperateExpect(txn, key, + bin2, + Operation.Put(new Bin(binName, "val2")), + Operation.Get(bin2.name) + ), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnOperateWriteAbort() + { + Key key = new(args.ns, args.set, "asyncTxnOperateWriteAbort"); + Txn txn = new(); + Bin bin2 = new("bin2", "bal1"); + + var cmds = new Runner[] + { + new Put(null, key, new Bin(binName, "val1"), bin2), + new OperateExpect(txn, key, + bin2, + Operation.Put(new Bin(binName, "val2")), + Operation.Get(bin2.name) + ), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnUDF() + { + Key key = new(args.ns, args.set, "asyncTxnUDF"); + Txn txn = new(); + Bin bin2 = new("bin2", "bal1"); + + var cmds = new Runner[] + { + new Put(null, key, new Bin(binName, "val1"), bin2), + new UDF(txn, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnUDFAbort() + { + Key key = new(args.ns, args.set, "asyncTxnUDFAbort"); + Txn txn = new(); + Bin bin2 = new("bin2", "bal1"); + + var cmds = new Runner[] + { + new Put(null, key, new Bin(binName, "val1"), bin2), + new UDF(txn, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnBatch() + { + Key[] keys = new Key[10]; + Bin bin = new(binName, 1); + + for (int i = 0; i < keys.Length; i++) + { + Key key = new(args.ns, args.set, "asyncTxnBatch" + i); + keys[i] = key; + + client.Put(null, key, bin); + } + + Txn txn = new(); + bin = new(binName, 2); + + var cmds = new Runner[] + { + new BatchGetExpect(null, keys, 1), + new BatchOperate(txn, keys, Operation.Put(bin)), + new Commit(txn), + new BatchGetExpect(null, keys, 2), + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnBatchAbort() + { + var keys = new Key[10]; + Bin bin = new(binName, 1); + + for (int i = 0; i < keys.Length; i++) + { + Key key = new(args.ns, args.set, "asyncTxnBatch" + i); + keys[i] = key; + + client.Put(null, key, bin); + } + + Txn txn = new(); + bin = new Bin(binName, 2); + + var cmds = new Runner[] + { + new BatchGetExpect(null, keys, 1), + new BatchOperate(txn, keys, Operation.Put(bin)), + new Abort(txn), + new BatchGetExpect(null, keys, 1), + }; + + Execute(cmds); + } + + private void Execute(Runner[] cmdArray) + { + Cmds a = new(this, cmdArray); + a.RunNext(); + WaitTillComplete(); + } + + private void OnError(Exception e) + { + SetError(e); + NotifyCompleted(); + } + + private void OnError(Exception e, int expectedResult) + { + if (e is AerospikeException ae) + { + if (ae.Result == expectedResult) + { + NotifyCompleted(); + return; + } + } + + OnError(e); + } + + private void OnError() + { + // Error is located in monitor instance which is checked in waitTillComplete(); + NotifyCompleted(); + } + + private class Cmds : Listener + { + private readonly TestAsyncTxn parent; + readonly Runner[] cmds; + int idx; + + public Cmds(TestAsyncTxn parent, Runner[] cmds) + { + this.parent = parent; + this.cmds = cmds; + this.idx = -1; + } + + public void RunNext() + { + if (++idx == cmds.Length) + { + parent.NotifyCompleted(); + return; + } + + try + { + cmds[idx].Run(parent, this); + } + catch (Exception e) + { + parent.OnError(e); + } + } + + public void OnSuccess() + { + RunNext(); + } + + public void OnFailure() + { + parent.OnError(); + } + + public void OnFailure(Exception e) + { + parent.OnError(e); + } + + public void OnFailure(Exception e, int expectedResult) + { + parent.OnError(e, expectedResult); + } + } + + public class Commit : Runner + { + private readonly Txn txn; + + public Commit(Txn txn) + { + this.txn = txn; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + client.Commit(new CommitHandler(listener), txn); + } + + private class CommitHandler : CommitListener + { + private readonly Listener listener; + + public CommitHandler(Listener listener) + { + this.listener = listener; + } + + public void OnSuccess(CommitStatusType status) + { + listener.OnSuccess(); + } + + public void OnFailure(AerospikeException.Commit e) + { + listener.OnFailure(e); + } + } + } + + public class Abort : Runner + { + private readonly Txn txn; + + public Abort(Txn txn) + { + this.txn = txn; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + client.Abort(new AbortHandler(listener), txn); + } + + private class AbortHandler : AbortListener + { + private readonly Listener listener; + + public AbortHandler(Listener listener) + { + this.listener = listener; + } + + public void OnSuccess(AbortStatusType status) + { + listener.OnSuccess(); + } + } + } + + public class Put : Runner + { + private readonly Txn txn; + private readonly Key key; + private readonly Bin[] bins; + private readonly int expectedResult = 0; + + public Put(Txn txn, Key key, string val) + { + this.txn = txn; + this.key = key; + this.bins = new Bin[] { new(binName, val)}; + } + + public Put(Txn txn, Key key, string val, int expectedResult) + { + this.txn = txn; + this.key = key; + this.bins = new Bin[] { new(binName, val) }; + this.expectedResult = expectedResult; + } + + public Put(Txn txn, Key key, params Bin[] bins) + { + this.txn = txn; + this.key = key; + this.bins = bins; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + WritePolicy wp = null; + if (txn != null) + { + wp = client.WritePolicyDefault; + wp.Txn = txn; + } + client.Put(wp, new PutHandler(listener, expectedResult), key, bins); + } + + private class PutHandler : WriteListener + { + private readonly Listener listener; + private readonly int expectedResult; + + public PutHandler(Listener listener, int expectedResult) + { + this.listener = listener; + this.expectedResult = expectedResult; + } + + public void OnSuccess(Key key) + { + listener.OnSuccess(); + } + + public void OnFailure(AerospikeException e) + { + if (expectedResult != 0) + { + listener.OnFailure(e, expectedResult); + } + else + { + listener.OnFailure(e); + } + } + } + } + + public class GetExpect : Runner + { + private readonly Txn txn; + private readonly Key key; + private readonly string expect; + + public GetExpect(Txn txn, Key key, string expect) + { + this.txn = txn; + this.key = key; + this.expect = expect; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + Policy p = null; + + if (txn != null) + { + p = client.ReadPolicyDefault; + p.Txn = txn; + } + client.Get(p, new GetExpectHandler(parent, listener, expect), key); + } + + private class GetExpectHandler : RecordListener + { + private readonly TestAsyncTxn parent; + private readonly Listener listener; + private string expect; + + public GetExpectHandler(TestAsyncTxn parent, Listener listener, string expect) + { + this.parent = parent; + this.listener = listener; + this.expect = expect; + } + + public void OnSuccess(Key key, Record record) + { + if (expect != null) + { + if (parent.AssertBinEqual(key, record, binName, expect)) + { + listener.OnSuccess(); + } + else + { + listener.OnFailure(); + } + } + else + { + if (parent.AssertRecordNotFound(key, record)) + { + listener.OnSuccess(); + } + else + { + listener.OnFailure(); + } + } + } + + public void OnFailure(AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public class OperateExpect : Runner + { + private readonly Txn txn; + private readonly Key key; + private readonly Operation[] ops; + private readonly Bin? expect; + + public OperateExpect(Txn txn, Key key, Bin? expect, params Operation[] ops) + { + this.txn = txn; + this.key = key; + this.expect = expect; + this.ops = ops; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + WritePolicy wp = null; + + if (txn != null) + { + wp = client.WritePolicyDefault; + wp.Txn = txn; + } + client.Operate(wp, new OperateExpectHandler(parent, listener, expect), key, ops); + } + + private class OperateExpectHandler : RecordListener + { + private readonly TestAsyncTxn parent; + + private readonly Listener listener; + private Bin? expect; + + public OperateExpectHandler(TestAsyncTxn parent, Listener listener, Bin? expect) + { + this.parent = parent; + this.listener = listener; + this.expect = expect; + } + + public void OnSuccess(Key key, Record record) + { + if (expect != null) + { + if (parent.AssertBinEqual(key, record, expect?.name, expect?.value.Object)) + { + listener.OnSuccess(); + } + else + { + listener.OnFailure(); + } + } + else + { + if (parent.AssertRecordNotFound(key, record)) + { + listener.OnSuccess(); + } + else + { + listener.OnFailure(); + } + } + } + + public void OnFailure(AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public class UDF : Runner + { + private readonly Txn txn; + private readonly Key key; + private readonly string packageName; + private readonly string functionName; + private readonly Value[] functionArgs; + + public UDF( + Txn txn, + Key key, + string packageName, + string functionName, + params Value[] functionArgs + ) { + this.txn = txn; + this.key = key; + this.packageName = packageName; + this.functionName = functionName; + this.functionArgs = functionArgs; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + WritePolicy wp = null; + + if (txn != null) + { + wp = client.WritePolicyDefault; + wp.Txn = txn; + } + client.Execute(wp, new UDFHandler(listener), key, packageName, functionName, functionArgs); + } + + private class UDFHandler : ExecuteListener + { + private readonly Listener listener; + + public UDFHandler(Listener listener) + { + this.listener = listener; + } + + public void OnSuccess(Key key, Object obj) + { + listener.OnSuccess(); + } + + public void OnFailure(AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public class BatchGetExpect : Runner + { + private readonly Txn txn; + private readonly Key[] keys; + private readonly int expected; + + public BatchGetExpect(Txn txn, Key[] keys, int expected) + { + this.txn = txn; + this.keys = keys; + this.expected = expected; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + BatchPolicy bp = null; + + if (txn != null) { + bp = client.BatchPolicyDefault; + bp.Txn = txn; + } + client.Get(bp, new BatchGetExpectHandler(parent, listener, expected), keys); + } + + private class BatchGetExpectHandler : RecordArrayListener + { + private readonly TestAsyncTxn parent; + private readonly Listener listener; + private readonly int expected; + + public BatchGetExpectHandler(TestAsyncTxn parent, Listener listener, int expected) + { + this.parent = parent; + this.listener = listener; + this.expected = expected; + } + + public void OnSuccess(Key[] keys, Record[] records) + { + if (parent.AssertBatchEqual(keys, records, binName, expected)) + { + listener.OnSuccess(); + } + else + { + listener.OnFailure(); + } + } + + public void OnFailure(AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public class BatchOperate : Runner + { + private readonly Txn txn; + private readonly Key[] keys; + private readonly Operation[] ops; + + public BatchOperate(Txn txn, Key[] keys, params Operation[] ops) + { + this.txn = txn; + this.keys = keys; + this.ops = ops; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + BatchPolicy bp = null; + + if (txn != null) + { + bp = client.BatchParentPolicyWriteDefault; + bp.Txn = txn; + } + client.Operate(bp, null, new BatchOperateHandler(listener), keys, ops); + } + + private class BatchOperateHandler : BatchRecordArrayListener + { + private Listener listener; + + public BatchOperateHandler(Listener listener) + { + this.listener = listener; + } + + public void OnSuccess(BatchRecord[] records, bool status) + { + if (status) + { + listener.OnSuccess(); + } + else + { + StringBuilder sb = new StringBuilder(); + sb.Append("Batch failed:"); + sb.Append(System.Environment.NewLine); + + foreach (BatchRecord br in records) + { + if (br.resultCode == 0) + { + sb.Append("Record: " + br.record); + } + else + { + sb.Append("ResultCode: " + br.resultCode); + } + sb.Append(System.Environment.NewLine); + } + listener.OnFailure(new AerospikeException(sb.ToString())); + } + } + + public void OnFailure(BatchRecord[] records, AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public class Touch : Runner + { + private readonly Txn txn; + private readonly Key key; + + public Touch(Txn txn, Key key) + { + this.txn = txn; + this.key = key; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + WritePolicy wp = null; + + if (txn != null) { + wp = client.WritePolicyDefault; + wp.Txn = txn; + } + client.Touch(wp, new TouchHandler(listener), key); + } + + private class TouchHandler : WriteListener + { + private Listener listener; + + public TouchHandler(Listener listener) + { + this.listener = listener; + } + + public void OnSuccess(Key key) + { + listener.OnSuccess(); + } + + public void OnFailure(AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public class Delete : Runner + { + private readonly Txn txn; + private readonly Key key; + + public Delete(Txn txn, Key key) + { + this.txn = txn; + this.key = key; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + WritePolicy wp = null; + + if (txn != null) + { + wp = client.WritePolicyDefault; + wp.Txn = txn; + wp.durableDelete = true; + } + client.Delete(wp, new DeleteHandler(listener), key); + } + + private class DeleteHandler : DeleteListener + { + private Listener listener; + + public DeleteHandler(Listener listener) + { + this.listener = listener; + } + + public void OnSuccess(Key key, bool existed) + { + listener.OnSuccess(); + } + + public void OnFailure(AerospikeException e) + { + listener.OnFailure(e); + } + } + } + + public interface Runner + { + void Run(TestAsyncTxn parent, Listener listener); + } + + public interface Listener + { + void OnSuccess(); + void OnFailure(); + void OnFailure(Exception e); + + void OnFailure(Exception e, int expectedResult); + } + } +} diff --git a/AerospikeTest/Sync/Basic/TestBatch.cs b/AerospikeTest/Sync/Basic/TestBatch.cs index cf318908..847a13ff 100644 --- a/AerospikeTest/Sync/Basic/TestBatch.cs +++ b/AerospikeTest/Sync/Basic/TestBatch.cs @@ -1,498 +1,498 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using Aerospike.Client; -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Neo.IronLua; -using System; -using System.Collections; -using System.Collections.Generic; -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Runtime.Intrinsics.X86; -using System.Security.Policy; -using System.Text; - -namespace Aerospike.Test -{ - [TestClass] - public class TestBatch : TestSync - { - private const string BinName = "bbin"; - private const string BinName2 = "bbin2"; - private const string BinName3 = "bbin3"; - private const string ListBin = "lbin"; - private const string ListBin2 = "lbin2"; - private const string KeyPrefix = "tbatkey"; - private const string ValuePrefix = "batchvalue"; - private const int Size = 8; - - [ClassInitialize()] - public static void WriteRecords(TestContext testContext) - { - WritePolicy policy = new WritePolicy(); - if (!args.testProxy || (args.testProxy && nativeClient != null)) - { - policy.expiration = 2592000; - } - if (args.testProxy) - { - policy.totalTimeout = args.proxyTotalTimeout; - } - - for (int i = 1; i <= Size; i++) - { - Key key = new Key(args.ns, args.set, KeyPrefix + i); - Bin bin = new Bin(BinName, ValuePrefix + i); - - List list = new List(); - - for (int j = 0; j < i; j++) - { - list.Add(j * i); - } - - List list2 = new List(); - - for (int j = 0; j < 2; j++) - { - list2.Add(j); - } - - Bin listBin = new Bin(ListBin, list); - Bin listBin2 = new Bin(ListBin2, list2); - - if (i != 6) - { - client.Put(policy, key, bin, listBin, listBin2); - } - else - { - client.Put(policy, key, new Bin(BinName, i), listBin, listBin2); - } - } - - // Add records that will eventually be deleted. - client.Put(policy, new Key(args.ns, args.set, 10000), new Bin(BinName, 10000)); - client.Put(policy, new Key(args.ns, args.set, 10001), new Bin(BinName, 10001)); - client.Put(policy, new Key(args.ns, args.set, 10002), new Bin(BinName, 10002)); - } - - [TestMethod] - public void BatchExists() - { - Key[] keys = new Key[Size]; - for (int i = 0; i < Size; i++) - { - keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); - } - - bool[] existsArray = client.Exists(null, keys); - Assert.AreEqual(Size, existsArray.Length); - - for (int i = 0; i < existsArray.Length; i++) - { - if (!existsArray[i]) - { - Assert.Fail("Some batch records not found."); - } - } - } - - [TestMethod] - public void BatchReads() - { - Key[] keys = new Key[Size]; - for (int i = 0; i < Size; i++) - { - keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); - } - - Record[] records = client.Get(null, keys, BinName); - Assert.AreEqual(Size, records.Length); - - for (int i = 0; i < records.Length; i++) - { - Key key = keys[i]; - Record record = records[i]; - - if (i != 5) - { - AssertBinEqual(key, record, BinName, ValuePrefix + (i + 1)); - } - else - { - AssertBinEqual(key, record, BinName, i + 1); - } - } - } - - [TestMethod] - public void BatchReadHeaders() - { - Key[] keys = new Key[Size]; - for (int i = 0; i < Size; i++) - { - keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); - } - - Record[] records = client.GetHeader(null, keys); - Assert.AreEqual(Size, records.Length); - - for (int i = 0; i < records.Length; i++) - { - Key key = keys[i]; - Record record = records[i]; - - AssertRecordFound(key, record); - Assert.AreNotEqual(0, record.generation); - // ttl can be zero if server default-ttl = 0. - //Assert.AreNotEqual(0, record.expiration); - } - } - - [TestMethod] - public void BatchReadComplex() - { - // Batch allows multiple namespaces in one call, but example test environment may only have one namespace. - - // bin * 8 - Expression exp = Exp.Build(Exp.Mul(Exp.IntBin(BinName), Exp.Val(8))); - Operation[] ops = Operation.Array(ExpOperation.Read(BinName, exp, ExpReadFlags.DEFAULT)); - - string[] bins = new string[] { BinName }; - List records = new List(); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 1), bins)); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 2), true)); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 3), true)); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 4), false)); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 5), true)); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 6), ops)); - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 7), bins)); - - // This record should be found, but the requested bin will not be found. - records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 8), new string[] { "binnotfound" })); - - // This record should not be found. - records.Add(new BatchRead(new Key(args.ns, args.set, "keynotfound"), bins)); - - // Execute batch. - client.Get(null, records); - - AssertBatchBinEqual(records, BinName, 0); - AssertBatchBinEqual(records, BinName, 1); - AssertBatchBinEqual(records, BinName, 2); - AssertBatchRecordExists(records, BinName, 3); - AssertBatchBinEqual(records, BinName, 4); - - BatchRead batch = records[5]; - AssertRecordFound(batch.key, batch.record); - int v = batch.record.GetInt(BinName); - Assert.AreEqual(48, v); - - AssertBatchBinEqual(records, BinName, 6); - - batch = records[7]; - AssertRecordFound(batch.key, batch.record); - object val = batch.record.GetValue("binnotfound"); - if (val != null) - { - Assert.Fail("Unexpected batch bin value received"); - } - - batch = records[8]; - if (batch.record != null) - { - Assert.Fail("Unexpected batch record received"); - } - } - - [TestMethod] - public void BatchListReadOperate() - { - Key[] keys = new Key[Size]; - for (int i = 0; i < Size; i++) - { - keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); - } - - Record[] records = client.Get(null, keys, - ListOperation.Size(ListBin), - ListOperation.GetByIndex(ListBin, -1, ListReturnType.VALUE)); - - Assert.AreEqual(Size, records.Length); - - for (int i = 0; i < records.Length; i++) - { - Record record = records[i]; - IList results = record.GetList(ListBin); - long size = (long)results[0]; - long val = (long)results[1]; - - Assert.AreEqual(i + 1, size); - Assert.AreEqual(i * (i + 1), val); - } - } - - [TestMethod] - public void BatchListWriteOperate() - { - Key[] keys = new Key[Size]; - for (int i = 0; i < Size; i++) - { - keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); - } - - // Add integer to list and get size and last element of list bin for all records. - BatchResults bresults = client.Operate(null, null, keys, - ListOperation.Insert(ListBin2, 0, Value.Get(1000)), - ListOperation.Size(ListBin2), - ListOperation.GetByIndex(ListBin2, -1, ListReturnType.VALUE) - ); - - for (int i = 0; i < bresults.records.Length; i++) - { - BatchRecord br = bresults.records[i]; - Assert.AreEqual(0, br.resultCode); - - IList results = br.record.GetList(ListBin2); - long size = (long)results[1]; - long val = (long)results[2]; - - Assert.AreEqual(3, size); - Assert.AreEqual(1, val); - } - } - - [TestMethod] - public void BatchOperateSendKey() - { - Key[] keys = new Key[3]; - for (int i = 0; i < 3; i++) - { - keys[i] = new Key(args.ns, args.set, "sendkey" + i); - } - - BatchWritePolicy batchWritePolicy = new() - { - sendKey = true - }; - - Operation[] ops = { - Operation.Put(new Bin("now", DateTime.Now.ToFileTime())) - }; - - client.Operate(null, batchWritePolicy, keys, ops); - - Key myKey = new(args.ns, args.set, "sendkey2"); - WritePolicy wp = new() - { - sendKey = true - }; - - client.Put(wp, myKey, new Bin("name", "Andrew")); - } - - [TestMethod] - public void BatchReadAllBins() - { - Key[] keys = new Key[Size]; - for (int i = 0; i < Size; i++) - { - keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); - } - - Bin bin = new Bin("bin5", "NewValue"); - - BatchResults bresults = client.Operate(null, null, keys, - Operation.Put(bin), - Operation.Get() - ); - - for (int i = 0; i < bresults.records.Length; i++) - { - BatchRecord br = bresults.records[i]; - Assert.AreEqual(0, br.resultCode); - - Record r = br.record; - - string s = r.GetString(bin.name); - Assert.AreEqual("NewValue", s); - - object obj = r.GetValue(BinName); - Assert.IsNotNull(obj); - } - } - - [TestMethod] - public void BatchWriteComplex() - { - Expression wexp1 = Exp.Build(Exp.Add(Exp.IntBin(BinName), Exp.Val(1000))); - - Operation[] wops1 = Operation.Array(Operation.Put(new Bin(BinName2, 100))); - Operation[] wops2 = Operation.Array(ExpOperation.Write(BinName3, wexp1, ExpWriteFlags.DEFAULT)); - Operation[] rops1 = Operation.Array(Operation.Get(BinName2)); - Operation[] rops2 = Operation.Array(Operation.Get(BinName3)); - - BatchWritePolicy wp = new BatchWritePolicy(); - wp.sendKey = true; - - BatchWrite bw1 = new BatchWrite(new Key(args.ns, args.set, KeyPrefix + 1), wops1); - BatchWrite bw2 = new BatchWrite(new Key("invalid", args.set, KeyPrefix + 1), wops1); - BatchWrite bw3 = new BatchWrite(wp, new Key(args.ns, args.set, KeyPrefix + 6), wops2); - BatchDelete bd1 = new BatchDelete(new Key(args.ns, args.set, 10002)); - - List records = new List(); - records.Add(bw1); - records.Add(bw2); - records.Add(bw3); - records.Add(bd1); - - bool status = client.Operate(null, records); - - Assert.IsFalse(status); // "invalid" namespace triggers the false status. - Assert.AreEqual(0, bw1.resultCode); - AssertBinEqual(bw1.key, bw1.record, BinName2, 0); - Assert.AreEqual(ResultCode.INVALID_NAMESPACE, bw2.resultCode); - Assert.AreEqual(0, bw3.resultCode); - AssertBinEqual(bw3.key, bw3.record, BinName3, 0); - Assert.AreEqual(ResultCode.OK, bd1.resultCode); - - BatchRead br1 = new BatchRead(new Key(args.ns, args.set, KeyPrefix + 1), rops1); - BatchRead br2 = new BatchRead(new Key(args.ns, args.set, KeyPrefix + 6), rops2); - BatchRead br3 = new BatchRead(new Key(args.ns, args.set, 10002), true); - - records.Clear(); - records.Add(br1); - records.Add(br2); - records.Add(br3); - - status = client.Operate(null, records); - - Assert.IsFalse(status); // Read of deleted record causes status to be false. - AssertBinEqual(br1.key, br1.record, BinName2, 100); - AssertBinEqual(br2.key, br2.record, BinName3, 1006); - Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br3.resultCode); - } - - [TestMethod] - public void BatchDelete() - { - // Define keys - Key[] keys = new Key[] { new Key(args.ns, args.set, 10000), new Key(args.ns, args.set, 10001) }; - - // Ensure keys exists - bool[] exists = client.Exists(null, keys); - Assert.IsTrue(exists[0]); - Assert.IsTrue(exists[1]); - - // Delete keys - BatchResults br = client.Delete(null, null, keys); - Assert.IsTrue(br.status); - - // Ensure keys do not exist - exists = client.Exists(null, keys); - Assert.IsFalse(exists[0]); - Assert.IsFalse(exists[1]); - } - - [TestMethod] - public void BatchReadTTL() - { - // WARNING: This test takes a long time to run due to sleeps. - // Define keys - Key key1 = new(args.ns, args.set, 88888); - Key key2 = new(args.ns, args.set, 88889); - - // Write keys with ttl. - BatchWritePolicy bwp = new() - { - expiration = 10 - }; - Key[] keys = new Key[] { key1, key2 }; - client.Operate(null, bwp, keys, Operation.Put(new Bin("a", 1))); - - // Read records before they expire and reset read ttl on one record. - Util.Sleep(8000); - BatchReadPolicy brp1 = new() - { - readTouchTtlPercent = 80 - }; - - BatchReadPolicy brp2 = new() - { - readTouchTtlPercent = -1 - }; - - BatchRead br1 = new(brp1, key1, new String[] { "a" }); - BatchRead br2 = new(brp2, key2, new String[] { "a" }); - - List list = new() - { - br1, - br2 - }; - - bool rv = client.Operate(null, list); - - Assert.IsTrue(rv); - Assert.AreEqual(ResultCode.OK, br1.resultCode); - Assert.AreEqual(ResultCode.OK, br2.resultCode); - - // Read records again, but don't reset read ttl. - Util.Sleep(3000); - brp1.readTouchTtlPercent = -1; - brp2.readTouchTtlPercent = -1; - - br1 = new BatchRead(brp1, key1, new String[] { "a" }); - br2 = new BatchRead(brp2, key2, new String[] { "a" }); - - list.Clear(); - list.Add(br1); - list.Add(br2); - - rv = client.Operate(null, list); - - // Key 2 should have expired. - Assert.AreEqual(ResultCode.OK, br1.resultCode); - Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br2.resultCode); - Assert.IsFalse(rv); - - // Read record after it expires, showing it's gone. - Util.Sleep(8000); - rv = client.Operate(null, list); - Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br1.resultCode); - Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br2.resultCode); - Assert.IsFalse(rv); - } - - - private void AssertBatchBinEqual(List list, string binName, int i) - { - BatchRead batch = list[i]; - AssertBinEqual(batch.key, batch.record, binName, ValuePrefix + (i + 1)); - } - - private void AssertBatchRecordExists(List list, string binName, int i) - { - BatchRead batch = list[i]; - AssertRecordFound(batch.key, batch.record); - Assert.AreNotEqual(0, batch.record.generation); - // ttl can be zero if server default-ttl = 0. - // Assert.AreNotEqual(0, batch.record.expiration); - } - } -} +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Aerospike.Client; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Neo.IronLua; +using System; +using System.Collections; +using System.Collections.Generic; +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics.X86; +using System.Security.Policy; +using System.Text; + +namespace Aerospike.Test +{ + [TestClass] + public class TestBatch : TestSync + { + private const string BinName = "bbin"; + private const string BinName2 = "bbin2"; + private const string BinName3 = "bbin3"; + private const string ListBin = "lbin"; + private const string ListBin2 = "lbin2"; + private const string KeyPrefix = "tbatkey"; + private const string ValuePrefix = "batchvalue"; + private const int Size = 8; + + [ClassInitialize()] + public static void WriteRecords(TestContext testContext) + { + WritePolicy policy = new WritePolicy(); + if (!args.testProxy || (args.testProxy && nativeClient != null)) + { + policy.expiration = 2592000; + } + if (args.testProxy) + { + policy.totalTimeout = args.proxyTotalTimeout; + } + + for (int i = 1; i <= Size; i++) + { + Key key = new Key(args.ns, args.set, KeyPrefix + i); + Bin bin = new Bin(BinName, ValuePrefix + i); + + List list = new List(); + + for (int j = 0; j < i; j++) + { + list.Add(j * i); + } + + List list2 = new List(); + + for (int j = 0; j < 2; j++) + { + list2.Add(j); + } + + Bin listBin = new Bin(ListBin, list); + Bin listBin2 = new Bin(ListBin2, list2); + + if (i != 6) + { + client.Put(policy, key, bin, listBin, listBin2); + } + else + { + client.Put(policy, key, new Bin(BinName, i), listBin, listBin2); + } + } + + // Add records that will eventually be deleted. + client.Put(policy, new Key(args.ns, args.set, 10000), new Bin(BinName, 10000)); + client.Put(policy, new Key(args.ns, args.set, 10001), new Bin(BinName, 10001)); + client.Put(policy, new Key(args.ns, args.set, 10002), new Bin(BinName, 10002)); + } + + [TestMethod] + public void BatchExists() + { + Key[] keys = new Key[Size]; + for (int i = 0; i < Size; i++) + { + keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); + } + + bool[] existsArray = client.Exists(null, keys); + Assert.AreEqual(Size, existsArray.Length); + + for (int i = 0; i < existsArray.Length; i++) + { + if (!existsArray[i]) + { + Assert.Fail("Some batch records not found."); + } + } + } + + [TestMethod] + public void BatchReads() + { + Key[] keys = new Key[Size]; + for (int i = 0; i < Size; i++) + { + keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); + } + + Record[] records = client.Get(null, keys, BinName); + Assert.AreEqual(Size, records.Length); + + for (int i = 0; i < records.Length; i++) + { + Key key = keys[i]; + Record record = records[i]; + + if (i != 5) + { + AssertBinEqual(key, record, BinName, ValuePrefix + (i + 1)); + } + else + { + AssertBinEqual(key, record, BinName, i + 1); + } + } + } + + [TestMethod] + public void BatchReadHeaders() + { + Key[] keys = new Key[Size]; + for (int i = 0; i < Size; i++) + { + keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); + } + + Record[] records = client.GetHeader(null, keys); + Assert.AreEqual(Size, records.Length); + + for (int i = 0; i < records.Length; i++) + { + Key key = keys[i]; + Record record = records[i]; + + AssertRecordFound(key, record); + Assert.AreNotEqual(0, record.generation); + // ttl can be zero if server default-ttl = 0. + //Assert.AreNotEqual(0, record.expiration); + } + } + + [TestMethod] + public void BatchReadComplex() + { + // Batch allows multiple namespaces in one call, but example test environment may only have one namespace. + + // bin * 8 + Expression exp = Exp.Build(Exp.Mul(Exp.IntBin(BinName), Exp.Val(8))); + Operation[] ops = Operation.Array(ExpOperation.Read(BinName, exp, ExpReadFlags.DEFAULT)); + + string[] bins = new string[] { BinName }; + List records = new List(); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 1), bins)); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 2), true)); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 3), true)); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 4), false)); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 5), true)); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 6), ops)); + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 7), bins)); + + // This record should be found, but the requested bin will not be found. + records.Add(new BatchRead(new Key(args.ns, args.set, KeyPrefix + 8), new string[] { "binnotfound" })); + + // This record should not be found. + records.Add(new BatchRead(new Key(args.ns, args.set, "keynotfound"), bins)); + + // Execute batch. + client.Get(null, records); + + AssertBatchBinEqual(records, BinName, 0); + AssertBatchBinEqual(records, BinName, 1); + AssertBatchBinEqual(records, BinName, 2); + AssertBatchRecordExists(records, BinName, 3); + AssertBatchBinEqual(records, BinName, 4); + + BatchRead batch = records[5]; + AssertRecordFound(batch.key, batch.record); + int v = batch.record.GetInt(BinName); + Assert.AreEqual(48, v); + + AssertBatchBinEqual(records, BinName, 6); + + batch = records[7]; + AssertRecordFound(batch.key, batch.record); + object val = batch.record.GetValue("binnotfound"); + if (val != null) + { + Assert.Fail("Unexpected batch bin value received"); + } + + batch = records[8]; + if (batch.record != null) + { + Assert.Fail("Unexpected batch record received"); + } + } + + [TestMethod] + public void BatchListReadOperate() + { + Key[] keys = new Key[Size]; + for (int i = 0; i < Size; i++) + { + keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); + } + + Record[] records = client.Get(null, keys, + ListOperation.Size(ListBin), + ListOperation.GetByIndex(ListBin, -1, ListReturnType.VALUE)); + + Assert.AreEqual(Size, records.Length); + + for (int i = 0; i < records.Length; i++) + { + Record record = records[i]; + IList results = record.GetList(ListBin); + long size = (long)results[0]; + long val = (long)results[1]; + + Assert.AreEqual(i + 1, size); + Assert.AreEqual(i * (i + 1), val); + } + } + + [TestMethod] + public void BatchListWriteOperate() + { + Key[] keys = new Key[Size]; + for (int i = 0; i < Size; i++) + { + keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); + } + + // Add integer to list and get size and last element of list bin for all records. + BatchResults bresults = client.Operate(null, null, keys, + ListOperation.Insert(ListBin2, 0, Value.Get(1000)), + ListOperation.Size(ListBin2), + ListOperation.GetByIndex(ListBin2, -1, ListReturnType.VALUE) + ); + + for (int i = 0; i < bresults.records.Length; i++) + { + BatchRecord br = bresults.records[i]; + Assert.AreEqual(0, br.resultCode); + + IList results = br.record.GetList(ListBin2); + long size = (long)results[1]; + long val = (long)results[2]; + + Assert.AreEqual(3, size); + Assert.AreEqual(1, val); + } + } + + [TestMethod] + public void BatchOperateSendKey() + { + Key[] keys = new Key[3]; + for (int i = 0; i < 3; i++) + { + keys[i] = new Key(args.ns, args.set, "sendkey" + i); + } + + BatchWritePolicy batchWritePolicy = new() + { + sendKey = true + }; + + Operation[] ops = { + Operation.Put(new Bin("now", DateTime.Now.ToFileTime())) + }; + + client.Operate(null, batchWritePolicy, keys, ops); + + Key myKey = new(args.ns, args.set, "sendkey2"); + WritePolicy wp = new() + { + sendKey = true + }; + + client.Put(wp, myKey, new Bin("name", "Andrew")); + } + + [TestMethod] + public void BatchReadAllBins() + { + Key[] keys = new Key[Size]; + for (int i = 0; i < Size; i++) + { + keys[i] = new Key(args.ns, args.set, KeyPrefix + (i + 1)); + } + + Bin bin = new Bin("bin5", "NewValue"); + + BatchResults bresults = client.Operate(null, null, keys, + Operation.Put(bin), + Operation.Get() + ); + + for (int i = 0; i < bresults.records.Length; i++) + { + BatchRecord br = bresults.records[i]; + Assert.AreEqual(0, br.resultCode); + + Record r = br.record; + + string s = r.GetString(bin.name); + Assert.AreEqual("NewValue", s); + + object obj = r.GetValue(BinName); + Assert.IsNotNull(obj); + } + } + + [TestMethod] + public void BatchWriteComplex() + { + Expression wexp1 = Exp.Build(Exp.Add(Exp.IntBin(BinName), Exp.Val(1000))); + + Operation[] wops1 = Operation.Array(Operation.Put(new Bin(BinName2, 100))); + Operation[] wops2 = Operation.Array(ExpOperation.Write(BinName3, wexp1, ExpWriteFlags.DEFAULT)); + Operation[] rops1 = Operation.Array(Operation.Get(BinName2)); + Operation[] rops2 = Operation.Array(Operation.Get(BinName3)); + + BatchWritePolicy wp = new BatchWritePolicy(); + wp.sendKey = true; + + BatchWrite bw1 = new BatchWrite(new Key(args.ns, args.set, KeyPrefix + 1), wops1); + BatchWrite bw2 = new BatchWrite(new Key("invalid", args.set, KeyPrefix + 1), wops1); + BatchWrite bw3 = new BatchWrite(wp, new Key(args.ns, args.set, KeyPrefix + 6), wops2); + BatchDelete bd1 = new BatchDelete(new Key(args.ns, args.set, 10002)); + + List records = new List(); + records.Add(bw1); + records.Add(bw2); + records.Add(bw3); + records.Add(bd1); + + bool status = client.Operate(null, records); + + Assert.IsFalse(status); // "invalid" namespace triggers the false status. + Assert.AreEqual(0, bw1.resultCode); + AssertBinEqual(bw1.key, bw1.record, BinName2, 0); + Assert.AreEqual(ResultCode.INVALID_NAMESPACE, bw2.resultCode); + Assert.AreEqual(0, bw3.resultCode); + AssertBinEqual(bw3.key, bw3.record, BinName3, 0); + Assert.AreEqual(ResultCode.OK, bd1.resultCode); + + BatchRead br1 = new BatchRead(new Key(args.ns, args.set, KeyPrefix + 1), rops1); + BatchRead br2 = new BatchRead(new Key(args.ns, args.set, KeyPrefix + 6), rops2); + BatchRead br3 = new BatchRead(new Key(args.ns, args.set, 10002), true); + + records.Clear(); + records.Add(br1); + records.Add(br2); + records.Add(br3); + + status = client.Operate(null, records); + + Assert.IsFalse(status); // Read of deleted record causes status to be false. + AssertBinEqual(br1.key, br1.record, BinName2, 100); + AssertBinEqual(br2.key, br2.record, BinName3, 1006); + Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br3.resultCode); + } + + [TestMethod] + public void BatchDelete() + { + // Define keys + Key[] keys = new Key[] { new Key(args.ns, args.set, 10000), new Key(args.ns, args.set, 10001) }; + + // Ensure keys exists + bool[] exists = client.Exists(null, keys); + Assert.IsTrue(exists[0]); + Assert.IsTrue(exists[1]); + + // Delete keys + BatchResults br = client.Delete(null, null, keys); + Assert.IsTrue(br.status); + + // Ensure keys do not exist + exists = client.Exists(null, keys); + Assert.IsFalse(exists[0]); + Assert.IsFalse(exists[1]); + } + + [TestMethod] + public void BatchReadTTL() + { + // WARNING: This test takes a long time to run due to sleeps. + // Define keys + Key key1 = new(args.ns, args.set, 88888); + Key key2 = new(args.ns, args.set, 88889); + + // Write keys with ttl. + BatchWritePolicy bwp = new() + { + expiration = 10 + }; + Key[] keys = new Key[] { key1, key2 }; + client.Operate(null, bwp, keys, Operation.Put(new Bin("a", 1))); + + // Read records before they expire and reset read ttl on one record. + Util.Sleep(8000); + BatchReadPolicy brp1 = new() + { + readTouchTtlPercent = 80 + }; + + BatchReadPolicy brp2 = new() + { + readTouchTtlPercent = -1 + }; + + BatchRead br1 = new(brp1, key1, new String[] { "a" }); + BatchRead br2 = new(brp2, key2, new String[] { "a" }); + + List list = new() + { + br1, + br2 + }; + + bool rv = client.Operate(null, list); + + Assert.IsTrue(rv); + Assert.AreEqual(ResultCode.OK, br1.resultCode); + Assert.AreEqual(ResultCode.OK, br2.resultCode); + + // Read records again, but don't reset read ttl. + Util.Sleep(3000); + brp1.readTouchTtlPercent = -1; + brp2.readTouchTtlPercent = -1; + + br1 = new BatchRead(brp1, key1, new String[] { "a" }); + br2 = new BatchRead(brp2, key2, new String[] { "a" }); + + list.Clear(); + list.Add(br1); + list.Add(br2); + + rv = client.Operate(null, list); + + // Key 2 should have expired. + Assert.AreEqual(ResultCode.OK, br1.resultCode); + Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br2.resultCode); + Assert.IsFalse(rv); + + // Read record after it expires, showing it's gone. + Util.Sleep(8000); + rv = client.Operate(null, list); + Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br1.resultCode); + Assert.AreEqual(ResultCode.KEY_NOT_FOUND_ERROR, br2.resultCode); + Assert.IsFalse(rv); + } + + + private void AssertBatchBinEqual(List list, string binName, int i) + { + BatchRead batch = list[i]; + AssertBinEqual(batch.key, batch.record, binName, ValuePrefix + (i + 1)); + } + + private void AssertBatchRecordExists(List list, string binName, int i) + { + BatchRead batch = list[i]; + AssertRecordFound(batch.key, batch.record); + Assert.AreNotEqual(0, batch.record.generation); + // ttl can be zero if server default-ttl = 0. + // Assert.AreNotEqual(0, batch.record.expiration); + } + } +} diff --git a/AerospikeTest/Sync/Basic/TestTxn.cs b/AerospikeTest/Sync/Basic/TestTxn.cs index 3a3784fa..bb5c6930 100644 --- a/AerospikeTest/Sync/Basic/TestTxn.cs +++ b/AerospikeTest/Sync/Basic/TestTxn.cs @@ -1,492 +1,492 @@ -/* - * Copyright 2012-2018 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -using Microsoft.VisualStudio.TestTools.UnitTesting; -using Aerospike.Client; -using System.Reflection; -using System.Text; - -namespace Aerospike.Test -{ - [TestClass] - public class TestTxn : TestSync - { - private static readonly string binName = "bin"; - - [ClassInitialize()] - public static void Prepare(TestContext testContext) - { - if (!args.testProxy || (args.testProxy && nativeClient != null)) - { - Assembly assembly = Assembly.GetExecutingAssembly(); - RegisterTask task = nativeClient.Register(null, assembly, "Aerospike.Test.LuaResources.record_example.lua", "record_example.lua", Language.LUA); - task.Wait(); - } - } - - [TestMethod] - public void TxnWrite() - { - Key key = new(args.ns, args.set, "mrtkey1"); - - client.Put(null, key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Put(wp, key, new Bin(binName, "val2")); - - client.Commit(txn); - - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val2"); - } - - [TestMethod] - public void TxnWriteTwice() - { - Key key = new(args.ns, args.set, "mrtkey2"); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Put(wp, key, new Bin(binName, "val1")); - client.Put(wp, key, new Bin(binName, "val2")); - - client.Commit(txn); - - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val2"); - } - - [TestMethod] - public void TxnWriteConflict() - { - Key key = new(args.ns, args.set, "mrtkey21"); - - Txn txn1 = new(); - Txn txn2 = new(); - - WritePolicy wp1 = client.WritePolicyDefault; - WritePolicy wp2 = client.WritePolicyDefault; - wp1.Txn = txn1; - wp2.Txn = txn2; - - client.Put(wp1, key, new Bin(binName, "val1")); - - try - { - client.Put(wp2, key, new Bin(binName, "val2")); - } - catch (AerospikeException ae) - { - if (ae.Result != ResultCode.MRT_BLOCKED) - { - throw ae; - } - } - - client.Commit(txn1); - client.Commit(txn2); - - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); - } - - [TestMethod] - public void TxnWriteBlock() - { - Key key = new(args.ns, args.set, "mrtkey3"); - - client.Put(null, key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Put(wp, key, new Bin(binName, "val2")); - - try - { - // This write should be blocked. - client.Put(null, key, new Bin(binName, "val3")); - throw new AerospikeException("Unexpected success"); - } - catch (AerospikeException e) - { - if (e.Result != ResultCode.MRT_BLOCKED) - { - throw e; - } - } - - client.Commit(txn); - } - - [TestMethod] - public void TxnWriteRead() - { - Key key = new(args.ns, args.set, "mrtkey4"); - - client.Put(null, key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Put(wp, key, new Bin(binName, "val2")); - - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); - - client.Commit(txn); - - record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val2"); - } - - [TestMethod] - public void TxnWriteAbort() - { - Key key = new(args.ns, args.set, "mrtkey5"); - - client.Put(null, key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Put(wp, key, new Bin(binName, "val2")); - - Policy p = client.ReadPolicyDefault; - p.Txn = txn; - Record record = client.Get(p, key); - AssertBinEqual(key, record, binName, "val2"); - - client.Abort(txn); - - record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); - } - - [TestMethod] - public void TxnDelete() - { - Key key = new(args.ns, args.set, "mrtkey6"); - - client.Put(null, key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - wp.durableDelete = true; - client.Delete(wp, key); - - client.Commit(txn); - - Record record = client.Get(null, key); - Assert.IsNull(record); - } - - [TestMethod] - public void TxnDeleteAbort() - { - Key key = new(args.ns, args.set, "mrtkey7"); - - client.Put(null, key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - wp.durableDelete = true; - client.Delete(wp, key); - - client.Abort(txn); - - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); - } - - [TestMethod] - public void TxnDeleteTwice() - { - Key key = new(args.ns, args.set, "mrtkey8"); - - Txn txn = new(); - - client.Put(null, key, new Bin(binName, "val1")); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - wp.durableDelete = true; - client.Delete(wp, key); - client.Delete(wp, key); - - client.Commit(txn); - - Record record = client.Get(null, key); - Assert.IsNull(record); - } - - [TestMethod] - public void TxnTouch() - { - Key key = new(args.ns, args.set, "mrtkey9"); - - client.Put(null, key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Touch(wp, key); - - client.Commit(txn); - - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); - } - - [TestMethod] - public void TxnTouchAbort() - { - Key key = new(args.ns, args.set, "mrtkey10"); - - client.Put(null, key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Touch(wp, key); - - client.Abort(txn); - - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); - } - - [TestMethod] - public void TxnOperateWrite() - { - Key key = new(args.ns, args.set, "mrtkey11"); - - client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - Record record = client.Operate(wp, key, - Operation.Put(new Bin(binName, "val2")), - Operation.Get("bin2") - ); - AssertBinEqual(key, record, "bin2", "bal1"); - - client.Commit(txn); - - record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val2"); - } - - [TestMethod] - public void TxnOperateWriteAbort() - { - Key key = new(args.ns, args.set, "mrtkey12"); - - client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - Record record = client.Operate(wp, key, - Operation.Put(new Bin(binName, "val2")), - Operation.Get("bin2") - ); - AssertBinEqual(key, record, "bin2", "bal1"); - - client.Abort(txn); - - record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); - } - - [TestMethod] - public void TxnUDF() - { - Key key = new(args.ns, args.set, "mrtkey13"); - - client.Put(null, key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); - - client.Commit(txn); - - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val2"); - } - - [TestMethod] - public void TxnUDFAbort() - { - Key key = new(args.ns, args.set, "mrtkey14"); - - client.Put(null, key, new Bin(binName, "val1")); - - Txn txn = new(); - - WritePolicy wp = client.WritePolicyDefault; - wp.Txn = txn; - client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); - - client.Abort(txn); - - Record record = client.Get(null, key); - AssertBinEqual(key, record, binName, "val1"); - } - - [TestMethod] - public void TxnBatch() - { - Key[] keys = new Key[10]; - Bin bin = new(binName, 1); - - for (int i = 0; i < keys.Length; i++) - { - Key key = new(args.ns, args.set, i); - keys[i] = key; - - client.Put(null, key, bin); - } - - Record[] recs = client.Get(null, keys); - AssertBatchEqual(keys, recs, 1); - - Txn txn = new(); - - bin = new(binName, 2); - - BatchPolicy bp = BatchPolicy.WriteDefault(); - bp.Txn = txn; - - BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); - - if (!bresults.status) - { - StringBuilder sb = new StringBuilder(); - sb.Append("Batch failed:"); - sb.Append(System.Environment.NewLine); - - foreach (BatchRecord br in bresults.records) - { - if (br.resultCode == 0) - { - sb.Append("Record: " + br.record); - } - else - { - sb.Append("ResultCode: " + br.resultCode); - } - sb.Append(System.Environment.NewLine); - } - - throw new AerospikeException(sb.ToString()); - } - - client.Commit(txn); - - recs = client.Get(null, keys); - AssertBatchEqual(keys, recs, 2); - } - - [TestMethod] - public void TxnBatchAbort() - { - var keys = new Key[10]; - Bin bin = new(binName, 1); - - for (int i = 0; i < keys.Length; i++) - { - Key key = new(args.ns, args.set, i); - keys[i] = key; - - client.Put(null, key, bin); - } - - Record[] recs = client.Get(null, keys); - AssertBatchEqual(keys, recs, 1); - - Txn txn = new(); - - bin = new Bin(binName, 2); - - BatchPolicy bp = BatchPolicy.WriteDefault(); - bp.Txn = txn; - - BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); - - if (!bresults.status) - { - StringBuilder sb = new StringBuilder(); - sb.Append("Batch failed:"); - sb.Append(System.Environment.NewLine); - - foreach (BatchRecord br in bresults.records) - { - if (br.resultCode == 0) - { - sb.Append("Record: " + br.record); - } - else - { - sb.Append("ResultCode: " + br.resultCode); - } - sb.Append(System.Environment.NewLine); - } - - throw new AerospikeException(sb.ToString()); - } - - client.Abort(txn); - - recs = client.Get(null, keys); - AssertBatchEqual(keys, recs, 1); - } - - private void AssertBatchEqual(Key[] keys, Record[] recs, int expected) - { - for (int i = 0; i < keys.Length; i++) - { - Key key = keys[i]; - Record rec = recs[i]; - - Assert.IsNotNull(rec); - - int received = rec.GetInt(binName); - Assert.AreEqual(expected, received); - } - } - } -} +/* + * Copyright 2012-2018 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Aerospike.Client; +using System.Reflection; +using System.Text; + +namespace Aerospike.Test +{ + [TestClass] + public class TestTxn : TestSync + { + private static readonly string binName = "bin"; + + [ClassInitialize()] + public static void Prepare(TestContext testContext) + { + if (!args.testProxy || (args.testProxy && nativeClient != null)) + { + Assembly assembly = Assembly.GetExecutingAssembly(); + RegisterTask task = nativeClient.Register(null, assembly, "Aerospike.Test.LuaResources.record_example.lua", "record_example.lua", Language.LUA); + task.Wait(); + } + } + + [TestMethod] + public void TxnWrite() + { + Key key = new(args.ns, args.set, "mrtkey111"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + client.Commit(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TxnWriteTwice() + { + Key key = new(args.ns, args.set, "mrtkey2"); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val1")); + client.Put(wp, key, new Bin(binName, "val2")); + + client.Commit(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TxnWriteConflict() + { + Key key = new(args.ns, args.set, "mrtkey21"); + + Txn txn1 = new(); + Txn txn2 = new(); + + WritePolicy wp1 = client.WritePolicyDefault; + WritePolicy wp2 = client.WritePolicyDefault; + wp1.Txn = txn1; + wp2.Txn = txn2; + + client.Put(wp1, key, new Bin(binName, "val1")); + + try + { + client.Put(wp2, key, new Bin(binName, "val2")); + } + catch (AerospikeException ae) + { + if (ae.Result != ResultCode.MRT_BLOCKED) + { + throw; + } + } + + client.Commit(txn1); + client.Commit(txn2); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TxnWriteBlock() + { + Key key = new(args.ns, args.set, "mrtkey3"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + try + { + // This write should be blocked. + client.Put(null, key, new Bin(binName, "val3")); + throw new AerospikeException("Unexpected success"); + } + catch (AerospikeException e) + { + if (e.Result != ResultCode.MRT_BLOCKED) + { + throw; + } + } + + client.Commit(txn); + } + + [TestMethod] + public void TxnWriteRead() + { + Key key = new(args.ns, args.set, "mrtkey4"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + + client.Commit(txn); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TxnWriteAbort() + { + Key key = new(args.ns, args.set, "mrtkey5"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + Policy p = client.ReadPolicyDefault; + p.Txn = txn; + Record record = client.Get(p, key); + AssertBinEqual(key, record, binName, "val2"); + + client.Abort(txn); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TxnDelete() + { + Key key = new(args.ns, args.set, "mrtkey6"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + wp.durableDelete = true; + client.Delete(wp, key); + + client.Commit(txn); + + Record record = client.Get(null, key); + Assert.IsNull(record); + } + + [TestMethod] + public void TxnDeleteAbort() + { + Key key = new(args.ns, args.set, "mrtkey7"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + wp.durableDelete = true; + client.Delete(wp, key); + + client.Abort(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TxnDeleteTwice() + { + Key key = new(args.ns, args.set, "mrtkey8"); + + Txn txn = new(); + + client.Put(null, key, new Bin(binName, "val1")); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + wp.durableDelete = true; + client.Delete(wp, key); + client.Delete(wp, key); + + client.Commit(txn); + + Record record = client.Get(null, key); + Assert.IsNull(record); + } + + [TestMethod] + public void TxnTouch() + { + Key key = new(args.ns, args.set, "mrtkey91"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Touch(wp, key); + + client.Commit(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TxnTouchAbort() + { + Key key = new(args.ns, args.set, "mrtkey10"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Touch(wp, key); + + client.Abort(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TxnOperateWrite() + { + Key key = new(args.ns, args.set, "mrtkey11"); + + client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + Record record = client.Operate(wp, key, + Operation.Put(new Bin(binName, "val2")), + Operation.Get("bin2") + ); + AssertBinEqual(key, record, "bin2", "bal1"); + + client.Commit(txn); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TxnOperateWriteAbort() + { + Key key = new(args.ns, args.set, "mrtkey12"); + + client.Put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + Record record = client.Operate(wp, key, + Operation.Put(new Bin(binName, "val2")), + Operation.Get("bin2") + ); + AssertBinEqual(key, record, "bin2", "bal1"); + + client.Abort(txn); + + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TxnUDF() + { + Key key = new(args.ns, args.set, "mrtkey13"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); + + client.Commit(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + } + + [TestMethod] + public void TxnUDFAbort() + { + Key key = new(args.ns, args.set, "mrtkey14"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); + + client.Abort(txn); + + Record record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val1"); + } + + [TestMethod] + public void TxnBatch() + { + Key[] keys = new Key[10]; + Bin bin = new(binName, 1); + + for (int i = 0; i < keys.Length; i++) + { + Key key = new(args.ns, args.set, i); + keys[i] = key; + + client.Put(null, key, bin); + } + + Record[] recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 1); + + Txn txn = new(); + + bin = new(binName, 2); + + BatchPolicy bp = BatchPolicy.WriteDefault(); + bp.Txn = txn; + + BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); + + if (!bresults.status) + { + StringBuilder sb = new StringBuilder(); + sb.Append("Batch failed:"); + sb.Append(System.Environment.NewLine); + + foreach (BatchRecord br in bresults.records) + { + if (br.resultCode == 0) + { + sb.Append("Record: " + br.record); + } + else + { + sb.Append("ResultCode: " + br.resultCode); + } + sb.Append(System.Environment.NewLine); + } + + throw new AerospikeException(sb.ToString()); + } + + client.Commit(txn); + + recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 2); + } + + [TestMethod] + public void TxnBatchAbort() + { + var keys = new Key[10]; + Bin bin = new(binName, 1); + + for (int i = 0; i < keys.Length; i++) + { + Key key = new(args.ns, args.set, i); + keys[i] = key; + + client.Put(null, key, bin); + } + + Record[] recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 1); + + Txn txn = new(); + + bin = new Bin(binName, 2); + + BatchPolicy bp = BatchPolicy.WriteDefault(); + bp.Txn = txn; + + BatchResults bresults = client.Operate(bp, null, keys, Operation.Put(bin)); + + if (!bresults.status) + { + StringBuilder sb = new StringBuilder(); + sb.Append("Batch failed:"); + sb.Append(System.Environment.NewLine); + + foreach (BatchRecord br in bresults.records) + { + if (br.resultCode == 0) + { + sb.Append("Record: " + br.record); + } + else + { + sb.Append("ResultCode: " + br.resultCode); + } + sb.Append(System.Environment.NewLine); + } + + throw new AerospikeException(sb.ToString()); + } + + client.Abort(txn); + + recs = client.Get(null, keys); + AssertBatchEqual(keys, recs, 1); + } + + private void AssertBatchEqual(Key[] keys, Record[] recs, int expected) + { + for (int i = 0; i < keys.Length; i++) + { + Key key = keys[i]; + Record rec = recs[i]; + + Assert.IsNotNull(rec); + + int received = rec.GetInt(binName); + Assert.AreEqual(expected, received); + } + } + } +} diff --git a/AerospikeTest/settings.json b/AerospikeTest/settings.json index 24dbc27b..6dcfac3f 100644 --- a/AerospikeTest/settings.json +++ b/AerospikeTest/settings.json @@ -1,27 +1,27 @@ -{ - "Host": "localhost", - "Port": 3100, - "ProxyHost": "localhost", - "ProxyPort": 4000, - "TestProxy": false, - "ClusterName": "", - "Namespace": "test", - "Set": "test", - "User": "charlie", - "Password": "123456", - "Timeout": 25000, - "UseServicesAlternate": true, - "TlsEnable": false, - "TlsName": "", - "TlsProtocols": "", - "TlsRevoke": "", - "TlsClientCertFile": "", - "TlsLoginOnly": false, - "ProxyTlsEnable": true, - "ProxyTlsName": "", - "ProxyTlsProtocols": "", - "ProxyTlsRevoke": "", - "ProxyTlsClientCertFile": "", - "ProxyTlsLoginOnly": true, - "AuthMode": "INTERNAL" -} +{ + "Host": "localhost", + "Port": 3000, + "ProxyHost": "localhost", + "ProxyPort": 4000, + "TestProxy": false, + "ClusterName": "", + "Namespace": "test", + "Set": "test", + "User": "", + "Password": "", + "Timeout": 25000, + "UseServicesAlternate": false, + "TlsEnable": false, + "TlsName": "", + "TlsProtocols": "", + "TlsRevoke": "", + "TlsClientCertFile": "", + "TlsLoginOnly": false, + "ProxyTlsEnable": true, + "ProxyTlsName": "", + "ProxyTlsProtocols": "", + "ProxyTlsRevoke": "", + "ProxyTlsClientCertFile": "", + "ProxyTlsLoginOnly": true, + "AuthMode": "INTERNAL" +} From b9ef3343dd2a89dcd0d14da94f4228ac945f8bc9 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Wed, 18 Sep 2024 13:42:31 -0600 Subject: [PATCH 06/41] Working on testing --- AerospikeClient/Main/Txn.cs | 2 +- AerospikeTest/Sync/Basic/TestTxn.cs | 130 ++++++++++++++++++++++++++++ 2 files changed, 131 insertions(+), 1 deletion(-) diff --git a/AerospikeClient/Main/Txn.cs b/AerospikeClient/Main/Txn.cs index 945d8cdd..ff5c182e 100644 --- a/AerospikeClient/Main/Txn.cs +++ b/AerospikeClient/Main/Txn.cs @@ -64,7 +64,7 @@ public Txn(int readsCapacity, int writesCapacity) } Id = CreateId(); - Reads = new ConcurrentDictionary(100, readsCapacity); // TODO: concurrency level + Reads = new ConcurrentDictionary(100, readsCapacity); Writes = new HashSet(writesCapacity); } diff --git a/AerospikeTest/Sync/Basic/TestTxn.cs b/AerospikeTest/Sync/Basic/TestTxn.cs index bb5c6930..f49bb36e 100644 --- a/AerospikeTest/Sync/Basic/TestTxn.cs +++ b/AerospikeTest/Sync/Basic/TestTxn.cs @@ -18,6 +18,7 @@ using Aerospike.Client; using System.Reflection; using System.Text; +using Microsoft.AspNetCore.DataProtection.KeyManagement; namespace Aerospike.Test { @@ -165,6 +166,8 @@ public void TxnWriteAbort() { Key key = new(args.ns, args.set, "mrtkey5"); + client.Delete(null, key); + client.Put(null, key, new Bin(binName, "val1")); Txn txn = new(); @@ -182,6 +185,7 @@ public void TxnWriteAbort() record = client.Get(null, key); AssertBinEqual(key, record, binName, "val1"); + Assert.AreEqual(3, record.generation); } [TestMethod] @@ -475,6 +479,132 @@ public void TxnBatchAbort() AssertBatchEqual(keys, recs, 1); } + [TestMethod] + public void TxnWriteCommitAbort() + { + Key key = new(args.ns, args.set, "mrtkey15"); + + client.Put(null, key, new Bin(binName, "val1")); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val2")); + + Policy p = client.ReadPolicyDefault; + p.Txn = txn; + Record record = client.Get(p, key); + AssertBinEqual(key, record, binName, "val2"); + + client.Commit(txn); + record = client.Get(null, key); + AssertBinEqual(key, record, binName, "val2"); + + var abortStatus = client.Abort(txn); + Assert.AreEqual(AbortStatus.AbortStatusType.ALREADY_ATTEMPTED, abortStatus); + } + + [TestMethod] + public void TxnWriteReadTwoTxn() + { + Txn txn1 = new(); + Txn txn2 = new(); + + Key key = new(args.ns, args.set, "mrtkey16"); + + client.Put(null, key, new Bin(binName, "val1")); + + var rp1 = client.ReadPolicyDefault; + rp1.Txn = txn1; + var record = client.Get(rp1, key); + AssertBinEqual(key, record, binName, "val1"); + + var rp2 = client.ReadPolicyDefault; + rp2.Txn = txn2; + record = client.Get(rp2, key); + AssertBinEqual(key, record, binName, "val1"); + + var status = client.Commit(txn1); + Assert.AreEqual(CommitStatus.CommitStatusType.OK, status); + + status = client.Commit(txn2); + Assert.AreEqual(CommitStatus.CommitStatusType.OK, status); + } + + [TestMethod] + public void TxnLUTCommit() + { + Txn txn = new(); + + Key key1 = new(args.ns, args.set, "mrtkey17"); + Key key2 = new(args.ns, args.set, "mrtkey18"); + Key key3 = new(args.ns, args.set, "mrtkey19"); + + var wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key1, new Bin(binName, "val1")); + + var p = client.ReadPolicyDefault; + p.Txn = txn; + var record = client.Get(p, key1); + Assert.AreEqual(1, record.generation); + + client.Put(wp, key1, new Bin(binName, "val11")); + + record = client.Get(p, key1); + Assert.AreEqual(2, record.generation); + + client.Put(null, key2, new Bin(binName, "val1")); + + record = client.Get(p, key2); + Assert.AreEqual(1, record.generation); + + client.Put(wp, key2, new Bin(binName, "val11")); + + record = client.Get(p, key2); + Assert.AreEqual(2, record.generation); + + client.Put(wp, key3, new Bin(binName, "val1")); + + record = client.Get(p, key3); + Assert.AreEqual(1, record.generation); + + client.Commit(txn); + + record = client.Get(null, key1); + Assert.AreEqual(3, record.generation); + record = client.Get(null, key2); + Assert.AreEqual(3, record.generation); + record = client.Get(null, key3); + Assert.AreEqual(2, record.generation); + } + + [TestMethod] + public void TxnInvalidNamespace() + { + Key key = new("invalid", args.set, "mrtkey"); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + + try + { + client.Put(wp, key, new Bin(binName, "val1")); + client.Commit(txn); + throw new AerospikeException("Unexpected success"); + } + catch (AerospikeException e) + { + if (e.Result != ResultCode.INVALID_NAMESPACE) + { + throw; + } + } + } + private void AssertBatchEqual(Key[] keys, Record[] recs, int expected) { for (int i = 0; i < keys.Length; i++) From cdce79039982c4ffacb256c2f7c07775007d21e9 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Tue, 8 Oct 2024 14:43:24 -0600 Subject: [PATCH 07/41] Working on tests and examples in Demo project --- AerospikeClient/Async/AsyncTxnRoll.cs | 61 +++-- AerospikeClient/Command/Command.cs | 3 +- AerospikeClient/Command/TxnRoll.cs | 72 +++-- AerospikeClient/Main/AerospikeException.cs | 29 ++- AerospikeClient/Main/Txn.cs | 37 +-- AerospikeClient/Util/ConcurrentHashMap.cs | 166 ++++++++++++ AerospikeDemo/AsyncTransaction.cs | 289 +++++++++++++++++++++ AerospikeDemo/DemoForm.cs | 2 + AerospikeDemo/Transaction.cs | 79 ++++++ AerospikeTest/Async/TestAsyncTxn.cs | 178 ++++++++++++- AerospikeTest/Sync/Basic/TestTxn.cs | 155 +++++++++-- 11 files changed, 979 insertions(+), 92 deletions(-) create mode 100644 AerospikeClient/Util/ConcurrentHashMap.cs create mode 100644 AerospikeDemo/AsyncTransaction.cs create mode 100644 AerospikeDemo/Transaction.cs diff --git a/AerospikeClient/Async/AsyncTxnRoll.cs b/AerospikeClient/Async/AsyncTxnRoll.cs index 276a8100..d88f629b 100644 --- a/AerospikeClient/Async/AsyncTxnRoll.cs +++ b/AerospikeClient/Async/AsyncTxnRoll.cs @@ -66,27 +66,33 @@ public void Abort(AbortListener listener) private void Verify(BatchRecordArrayListener verifyListener) { // Validate record versions in a batch. - HashSet> reads = txn.Reads.ToHashSet>(); - int max = reads.Count; - if (max == 0) - { - verifyListener.OnSuccess(new BatchRecord[0], true); - return; - } - - BatchRecord[] records = new BatchRecord[max]; - Key[] keys = new Key[max]; - long?[] versions = new long?[max]; int count = 0; + BatchRecord[] records = null; + Key[] keys = null; + long?[] versions = null; + + bool actionPerformed = txn.Reads.PerformActionOnEachElement(max => + { + if (max == 0) return false; - foreach (KeyValuePair entry in reads) + records = new BatchRecord[max]; + keys = new Key[max]; + versions = new long?[max]; + return true; + }, + (key, value, count) => { - Key key = entry.Key; keys[count] = key; records[count] = new BatchRecord(key, false); - versions[count] = entry.Value; - count++; + versions[count] = value; + }); + + if (!actionPerformed) // If no action was performed, there are no elements. Return. + { + verifyListener.OnSuccess(new BatchRecord[0], true); + return; } + this.verifyRecords = records; AsyncBatchTxnVerifyExecutor executor = new(cluster, verifyPolicy, verifyListener, keys, versions, records); @@ -115,7 +121,7 @@ private void RollForward() RollForwardListener rollListener = new(this); Roll(rollListener, Command.INFO4_MRT_ROLL_FORWARD); } - catch (Exception e) + catch (Exception) { NotifyCommitSuccess(CommitStatusType.ROLL_FORWARD_ABANDONED); } @@ -205,7 +211,7 @@ private void CloseOnAbort() AsyncTxnClose command = new(cluster, txn, deleteListener, writePolicy, txnKey); command.Execute(); } - catch (Exception e) + catch (Exception) { NotifyAbortSuccess(AbortStatusType.CLOSE_ABANDONED); } @@ -229,13 +235,26 @@ private void NotifyCommitFailure(CommitErrorType error, Exception cause, bool se { try { - AerospikeException.Commit aec = (cause == null) ? - new AerospikeException.Commit(error, verifyRecords, rollRecords) : - new AerospikeException.Commit(error, verifyRecords, rollRecords, cause); + AerospikeException.Commit aec; if (verifyException != null) { - //aec.AddSuppressed(verifyException); TODO + if (cause == null) + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, verifyException); + } + else + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, new[] { cause, verifyException }); + } + } + else if (cause != null) + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, cause); + } + else + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords); } if (cause is AerospikeException) { diff --git a/AerospikeClient/Command/Command.cs b/AerospikeClient/Command/Command.cs index aa271065..d9705949 100644 --- a/AerospikeClient/Command/Command.cs +++ b/AerospikeClient/Command/Command.cs @@ -303,6 +303,7 @@ public void SetTxnMarkRollForward(Key key) Begin(); int fieldCount = EstimateKeySize(key); EstimateOperationSize(bin); + SizeBuffer(); WriteTxnMonitor(key, 0, Command.INFO2_WRITE, fieldCount, 1); WriteOperation(bin, Operation.Type.WRITE); End(); @@ -448,6 +449,7 @@ public void SetTxnClose(Txn txn, Key key) { Begin(); int fieldCount = EstimateKeySize(key); + SizeBuffer(); WriteTxnMonitor(key, 0, Command.INFO2_WRITE | Command.INFO2_DELETE | Command.INFO2_DURABLE_DELETE, fieldCount, 0); End(); @@ -455,7 +457,6 @@ public void SetTxnClose(Txn txn, Key key) private void WriteTxnMonitor(Key key, int readAttr, int writeAttr, int fieldCount, int opCount) { - SizeBuffer(); dataOffset += 8; dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; dataBuffer[dataOffset++] = (byte)readAttr; diff --git a/AerospikeClient/Command/TxnRoll.cs b/AerospikeClient/Command/TxnRoll.cs index ca6aeed7..3b996168 100644 --- a/AerospikeClient/Command/TxnRoll.cs +++ b/AerospikeClient/Command/TxnRoll.cs @@ -54,8 +54,7 @@ public CommitStatusType Commit(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) catch (Exception e2) { // Throw combination of verify and roll exceptions. - - throw OnCommitError(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, e, false); + throw OnCommitError(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, e, false, e2); } if (txn.MonitorMightExist()) @@ -69,8 +68,7 @@ public CommitStatusType Commit(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) catch (Exception e3) { // Throw combination of verify and close exceptions. - //t.AddSuppressed(t3); - throw OnCommitError(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, e, false); + throw OnCommitError(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, e, false, e3); } } @@ -120,16 +118,38 @@ public CommitStatusType Commit(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) return CommitStatusType.OK; } - private AerospikeException.Commit OnCommitError(CommitErrorType error, Exception cause, bool setInDoubt) + private AerospikeException.Commit OnCommitError(CommitErrorType error, Exception cause, bool setInDoubt, Exception innerException = null) { - AerospikeException.Commit aec = new(error, verifyRecords, rollRecords, cause); - - if (cause is AerospikeException) { + AerospikeException.Commit aec; + + if (innerException != null) + { + if (cause == null) + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, innerException); + } + else + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, new[] { cause, innerException }); + } + } + else if (cause != null) + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, cause); + } + else + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords); + } + + if (cause is AerospikeException) + { AerospikeException src = (AerospikeException)cause; aec.Node = src.Node; aec.Policy = src.Policy; aec.Iteration = src.Iteration; - if (setInDoubt) { + if (setInDoubt) + { aec.SetInDoubt(src.InDoubt); } } @@ -166,26 +186,32 @@ public AbortStatusType Abort(BatchPolicy rollPolicy) private void Verify(BatchPolicy verifyPolicy) { // Validate record versions in a batch. - HashSet> reads = txn.Reads.ToHashSet>(); - int max = reads.Count; - if (max == 0) - { - return; - } - - BatchRecord[] records = new BatchRecord[max]; - Key[] keys = new Key[max]; - long?[] versions = new long?[max]; int count = 0; + BatchRecord[] records = null; + Key[] keys = null; + long?[] versions = null; - foreach (KeyValuePair entry in reads) + bool actionPerformed = txn.Reads.PerformActionOnEachElement(max => + { + if (max == 0) return false; + + records = new BatchRecord[max]; + keys = new Key[max]; + versions = new long?[max]; + return true; + }, + (key, value, count) => { - Key key = entry.Key; keys[count] = key; records[count] = new BatchRecord(key, false); - versions[count] = entry.Value; - count++; + versions[count] = value; + }); + + if (!actionPerformed) // If no action was performed, there are no elements. Return. + { + return; } + this.verifyRecords = records; BatchStatus status = new(true); diff --git a/AerospikeClient/Main/AerospikeException.cs b/AerospikeClient/Main/AerospikeException.cs index 62d7898f..b02d1bef 100644 --- a/AerospikeClient/Main/AerospikeException.cs +++ b/AerospikeClient/Main/AerospikeException.cs @@ -14,8 +14,10 @@ * License for the specific language governing permissions and limitations under * the License. */ +using System.Collections.ObjectModel; using System.Text; using static Aerospike.Client.AbortStatus; +using static Aerospike.Client.AerospikeException; using static Aerospike.Client.CommitError; namespace Aerospike.Client @@ -604,7 +606,9 @@ public EndOfGRPCStream(int resultCode) } /// - /// Exception thrown when {@link AerospikeClient#commit(com.aerospike.client.Txn)} fails. + /// Exception thrown when fails. + /// Commit Exception has similar behavior to AggregateException. + /// might be populated if mutliple exceptions contribute to the failure. /// public sealed class Commit : AerospikeException { @@ -624,20 +628,33 @@ public sealed class Commit : AerospikeException /// public readonly BatchRecord[] RollRecords; + private readonly Exception[] _innerExceptions; // Complete set of exceptions. + public Commit(CommitErrorType error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords) : base(ResultCode.TXN_FAILED, CommitErrorToString(error)) { this.Error = error; this.VerifyRecords = verifyRecords; this.RollRecords = rollRecords; + _innerExceptions = Array.Empty(); + } + + public Commit(CommitErrorType error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords, Exception innerException) + : base(ResultCode.TXN_FAILED, CommitErrorToString(error), innerException) + { + this.Error = error; + this.VerifyRecords = verifyRecords; + this.RollRecords = rollRecords; + _innerExceptions = new[] { innerException }; } - public Commit(CommitErrorType error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords, Exception cause) - : base(ResultCode.TXN_FAILED, CommitErrorToString(error), cause) + public Commit(CommitErrorType error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords, Exception[] innerExceptions) + : base(ResultCode.TXN_FAILED, CommitErrorToString(error), innerExceptions[0]) { this.Error = error; this.VerifyRecords = verifyRecords; this.RollRecords = rollRecords; + _innerExceptions = innerExceptions; } /// @@ -653,6 +670,12 @@ public override string Message return BaseMessage + sb.ToString(); } } + + /// + /// Gets a read-only collection of the instances that caused the + /// current exception. + /// + public ReadOnlyCollection InnerExceptions => new ReadOnlyCollection(_innerExceptions); } private static void RecordsToString(StringBuilder sb, String title, BatchRecord[] records) diff --git a/AerospikeClient/Main/Txn.cs b/AerospikeClient/Main/Txn.cs index ff5c182e..367a553f 100644 --- a/AerospikeClient/Main/Txn.cs +++ b/AerospikeClient/Main/Txn.cs @@ -25,8 +25,10 @@ namespace Aerospike.Client /// public class Txn { + private static long randomState = DateTime.UtcNow.Ticks; + public long Id { get; private set; } - public ConcurrentDictionary Reads { get; private set; } + public ConcurrentHashMap Reads { get; private set; } public HashSet Writes { get; private set; } public string Ns { get; private set; } public int Deadline { get; set; } @@ -41,7 +43,7 @@ public class Txn public Txn() { Id = CreateId(); - Reads = new ConcurrentDictionary(); + Reads = new ConcurrentHashMap(); Writes = new HashSet(); Deadline = 0; } @@ -55,7 +57,7 @@ public Txn(int readsCapacity, int writesCapacity) { if (readsCapacity < 16) { - readsCapacity = 16; + readsCapacity = 16; // TODO ask Richard and Brian about this } if (writesCapacity < 16) @@ -64,22 +66,27 @@ public Txn(int readsCapacity, int writesCapacity) } Id = CreateId(); - Reads = new ConcurrentDictionary(100, readsCapacity); + Reads = new ConcurrentHashMap(readsCapacity); Writes = new HashSet(writesCapacity); } + [System.Runtime.CompilerServices.MethodImpl(System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)] + private static long UnsignedRightShift(long n, int s) => n >= 0 ? n >> s : (n >> s) + (2 << ~s); + private static long CreateId() { - // An id of zero is considered invalid. Create random numbers - // in a loop until non-zero is returned. - Random r = new(); - long id = r.NextInt64(); - - while (id == 0) + // xorshift64* doesn't generate zeroes. + long oldState, newState, interlockedResult; + do { - id = r.NextInt64(); - } - return id; + oldState = Interlocked.Read(ref randomState); + newState = oldState; + newState ^= UnsignedRightShift(newState, 12); + newState ^= newState << 25; + newState ^= UnsignedRightShift(newState, 27); + interlockedResult = Interlocked.CompareExchange(ref randomState, newState, oldState); + } while (oldState != interlockedResult); + return newState * 0x2545f4914f6cdd1dL; } /// @@ -130,7 +137,7 @@ public void OnWrite(Key key, long? version, int resultCode) { if (resultCode == ResultCode.OK) { - Reads.Remove(key, out _); + Reads.Remove(key); Writes.Add(key); } } @@ -141,7 +148,7 @@ public void OnWrite(Key key, long? version, int resultCode) /// public void OnWriteInDoubt(Key key) { - Reads.Remove(key, out _); + Reads.Remove(key); Writes.Add(key); } diff --git a/AerospikeClient/Util/ConcurrentHashMap.cs b/AerospikeClient/Util/ConcurrentHashMap.cs new file mode 100644 index 00000000..a0adf5cd --- /dev/null +++ b/AerospikeClient/Util/ConcurrentHashMap.cs @@ -0,0 +1,166 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public class ConcurrentHashMap + { + private readonly ReaderWriterLockSlim _lock = new ReaderWriterLockSlim(LockRecursionPolicy.SupportsRecursion); + private readonly Dictionary _dictionary; + + public ConcurrentHashMap() + { + _dictionary = new Dictionary(); + } + + public ConcurrentHashMap(int capacity) + { + _dictionary = new Dictionary(capacity); + } + + public TValue this[TKey key] + { + get + { + try + { + _lock.EnterReadLock(); + return _dictionary[key]; + } + finally + { + if (_lock.IsReadLockHeld) _lock.ExitReadLock(); + } + } + set + { + try + { + _lock.EnterWriteLock(); + _dictionary[key] = value; + } + finally + { + if (_lock.IsWriteLockHeld) _lock.ExitWriteLock(); + } + } + } + + public bool TryAdd(TKey key, TValue value) + { + _lock.EnterUpgradeableReadLock(); + try + { + if (!_dictionary.ContainsKey(key)) + { + _lock.EnterWriteLock(); + try + { + _dictionary.Add(key, value); + } + finally + { + _lock.ExitWriteLock(); + } + return true; + } + } + finally + { + _lock.ExitUpgradeableReadLock(); + } + return false; + } + + public void Clear() + { + try + { + _lock.EnterWriteLock(); + _dictionary.Clear(); + } + finally + { + if (_lock.IsWriteLockHeld) _lock.ExitWriteLock(); + } + } + + public bool ContainsKey(TKey key) + { + try + { + _lock.EnterReadLock(); + return _dictionary.ContainsKey(key); + } + finally + { + if (_lock.IsReadLockHeld) _lock.ExitReadLock(); + } + } + + public bool Remove(TKey key) + { + try + { + _lock.EnterWriteLock(); + return _dictionary.Remove(key); + } + finally + { + if (_lock.IsWriteLockHeld) _lock.ExitWriteLock(); + } + } + + public int Count + { + get + { + try + { + _lock.EnterReadLock(); + return _dictionary.Count; + } + finally + { + if (_lock.IsReadLockHeld) _lock.ExitReadLock(); + } + } + } + + public bool PerformActionOnEachElement(Func initilaize, Action action) + { + _lock.EnterReadLock(); + try + { + if (initilaize is null || initilaize(_dictionary.Count())) + { + int cnt = 0; + foreach (var element in _dictionary) + { + action(element.Key, element.Value, cnt++); + } + return cnt > 0; + } + } + finally + { + _lock.ExitReadLock(); + } + return false; + } + } +} diff --git a/AerospikeDemo/AsyncTransaction.cs b/AerospikeDemo/AsyncTransaction.cs new file mode 100644 index 00000000..afecded2 --- /dev/null +++ b/AerospikeDemo/AsyncTransaction.cs @@ -0,0 +1,289 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Aerospike.Client; +using System.Threading; + +namespace Aerospike.Demo +{ + public class AsyncTransaction : AsyncExample + { + private bool completed; + + public AsyncTransaction(Console console) : base(console) + { + } + + /// + /// Multi-record transaction. + /// + public override void RunExample(AsyncClient client, Arguments args) + { + completed = false; + + Txn txn = new(); + + console.Info("Begin txn: " + txn.Id); + Put(client, txn, args); + + WaitTillComplete(); + } + + public void Put(AsyncClient client, Txn txn, Arguments args) + { + console.Info("Run put"); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + + Key key = new(args.ns, args.set, 1); + + client.Put(wp, new PutHandler(this, client, key, txn, args), key, new Bin("a", "val1")); + } + + class PutHandler : WriteListener + { + private readonly AsyncTransaction parent; + private readonly AsyncClient client; + private readonly Key key; + private readonly Txn txn; + private readonly Arguments args; + + public PutHandler(AsyncTransaction parent, AsyncClient client, Key key, Txn txn, Arguments args) + { + this.parent = parent; + this.client = client; + this.key = key; + this.txn = txn; + this.args = args; + } + + public void OnSuccess(Key key) + { + parent.PutAnother(client, txn, args); + } + + public void OnFailure(AerospikeException e) + { + parent.console.Error("Failed to write: namespace={0} set={1} key={2} exception={3}", + key.ns, key.setName, key.userKey, e.Message); + parent.Abort(client, txn); + } + }; + + public void PutAnother(AsyncClient client, Txn txn, Arguments args) + { + console.Info("Run another put"); + + var wp = client.WritePolicyDefault; + wp.Txn = txn; + + Key key = new(args.ns, args.set, 2); + + client.Put(wp, new PutAnotherHandler(this, client, key, txn, args), key, new Bin("b", "val2")); + } + + class PutAnotherHandler : WriteListener + { + private readonly AsyncTransaction parent; + private readonly AsyncClient client; + private readonly Key key; + private readonly Txn txn; + private readonly Arguments args; + + public PutAnotherHandler(AsyncTransaction parent, AsyncClient client, Key key, Txn txn, Arguments args) + { + this.parent = parent; + this.client = client; + this.key = key; + this.txn = txn; + this.args = args; + } + + public void OnSuccess(Key key) + { + parent.Get(client, txn, args); + } + + public void OnFailure(AerospikeException e) + { + parent.console.Error("Failed to write: namespace={0} set={1} key={2} exception={3}", + key.ns, key.setName, key.userKey, e.Message); + parent.Abort(client, txn); + } + } + + public void Get(AsyncClient client, Txn txn, Arguments args) + { + console.Info("Run get"); + + var p = client.ReadPolicyDefault; + p.Txn = txn; + + Key key = new(args.ns, args.set, 3); + + client.Get(p, new GetHandler(this, client, key, txn, args), key); + } + + class GetHandler : RecordListener + { + private readonly AsyncTransaction parent; + private readonly AsyncClient client; + private readonly Key key; + private readonly Txn txn; + private readonly Arguments args; + + public GetHandler(AsyncTransaction parent, AsyncClient client, Key key, Txn txn, Arguments args) + { + this.parent = parent; + this.client = client; + this.key = key; + this.txn = txn; + this.args = args; + } + + public void OnSuccess(Key key, Record record) + { + parent.Delete(client, txn, args); + } + + public void OnFailure(AerospikeException e) + { + parent.console.Error("Failed to read: namespace={0} set={1} key={2} exception={3}", + key.ns, key.setName, key.userKey, e.Message); + parent.Abort(client, txn); + } + } + + public void Delete(AsyncClient client, Txn txn, Arguments args) + { + console.Info("Run delete"); + + var dp = client.WritePolicyDefault; + dp.Txn = txn; + dp.durableDelete = true; // Required when running delete in a MRT. + + Key key = new(args.ns, args.set, 3); + + client.Delete(dp, new DeleteHandler(this, client, key, txn), key); + } + + class DeleteHandler : DeleteListener + { + private readonly AsyncTransaction parent; + private readonly AsyncClient client; + private readonly Key key; + private readonly Txn txn; + + public DeleteHandler(AsyncTransaction parent, AsyncClient client, Key key, Txn txn) + { + this.parent = parent; + this.client = client; + this.key = key; + this.txn = txn; + } + + public void OnSuccess(Key key, bool existed) + { + parent.Commit(client, txn); + } + + public void OnFailure(AerospikeException e) + { + parent.console.Error("Failed to delete: namespace={0} set={1} key={2} exception={3}", + key.ns, key.setName, key.userKey, e.Message); + parent.Abort(client, txn); + } + + } + + public void Commit(AsyncClient client, Txn txn) + { + console.Info("Run commit"); + + client.Commit(new CommitHandler(this, txn), txn); + } + + class CommitHandler : CommitListener + { + private readonly AsyncTransaction parent; + private readonly Txn txn; + + public CommitHandler(AsyncTransaction parent, Txn txn) + { + this.parent = parent; + this.txn = txn; + } + + public void OnSuccess(CommitStatus.CommitStatusType status) + { + parent.console.Info("Txn committed: " + txn.Id); + parent.NotifyComplete(); + } + + public void OnFailure(AerospikeException.Commit ae) + { + parent.console.Error("Txn commit failed: " + txn.Id); + parent.NotifyComplete(); + } + } + + public void Abort(AsyncClient client, Txn txn) + { + console.Info("Run abort"); + + client.Abort(new AbortHandler(this, txn), txn); + } + + class AbortHandler : AbortListener + { + private readonly AsyncTransaction parent; + private readonly Txn txn; + + public AbortHandler(AsyncTransaction parent, Txn txn) + { + this.parent = parent; + this.txn = txn; + } + + public void OnSuccess(AbortStatus.AbortStatusType status) + { + parent.console.Error("Txn aborted: " + txn.Id); + parent.NotifyComplete(); + } + } + + private void WaitTillComplete() + { + lock (this) + { + while (!completed) + { + Monitor.Wait(this); + } + } + } + + private void NotifyComplete() + { + lock (this) + { + completed = true; + Monitor.Pulse(this); + } + } + } +} diff --git a/AerospikeDemo/DemoForm.cs b/AerospikeDemo/DemoForm.cs index 6b31f8dd..65628263 100644 --- a/AerospikeDemo/DemoForm.cs +++ b/AerospikeDemo/DemoForm.cs @@ -73,6 +73,7 @@ private void FormInit() #endif new ExampleTreeNode("Expire", new Expire(console)), new ExampleTreeNode("Touch", new Touch(console)), + new ExampleTreeNode("Transaction", new Transaction(console)), new ExampleTreeNode("Operate", new Operate(console)), new ExampleTreeNode("OperateBit", new OperateBit(console)), new ExampleTreeNode("OperateList", new OperateList(console)), @@ -87,6 +88,7 @@ private void FormInit() new ExampleTreeNode("Async Batch", new AsyncBatch(console)), new ExampleTreeNode("Async Scan", new AsyncScan(console)), new ExampleTreeNode("Async Scan Page", new AsyncScanPage(console)), + new ExampleTreeNode("Async Transaction", new AsyncTransaction(console)), new ExampleTreeNode("Async Query", new AsyncQuery(console)), new ExampleTreeNode("Async UDF", new AsyncUserDefinedFunction(console)), new ExampleTreeNode("List/Map", new ListMap(console)), diff --git a/AerospikeDemo/Transaction.cs b/AerospikeDemo/Transaction.cs new file mode 100644 index 00000000..7c04098c --- /dev/null +++ b/AerospikeDemo/Transaction.cs @@ -0,0 +1,79 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +using Aerospike.Client; +using System; + +namespace Aerospike.Demo +{ + public class Transaction : SyncExample + { + public Transaction(Console console) : base(console) + { + } + + /// + /// Multi-record transaction. + /// + public override void RunExample(IAerospikeClient client, Arguments args) + { + TxnReadWrite(client, args); + } + + private void TxnReadWrite(IAerospikeClient client, Arguments args) + { + Txn txn = new(); + console.Info("Begin txn: " + txn.Id); + + try + { + var wp = client.WritePolicyDefault; + wp.Txn = txn; + + console.Info("Run put"); + Key key1 = new(args.ns, args.set, 1); + client.Put(wp, key1, new Bin("a", "val1")); + + console.Info("Run another put"); + Key key2 = new(args.ns, args.set, 2); + client.Put(wp, key2, new Bin("b", "val2")); + + console.Info("Run get"); + var p = client.ReadPolicyDefault; + p.Txn = txn; + + Key key3 = new(args.ns, args.set, 3); + Record rec = client.Get(p, key3); + + console.Info("Run delete"); + var dp = client.WritePolicyDefault; + dp.Txn = txn; + dp.durableDelete = true; // Required when running delete in a MRT. + client.Delete(dp, key3); + } + catch (Exception) + { + // Abort and rollback MRT (multi-record transaction) if any errors occur. + console.Info("Abort txn: " + txn.Id); + client.Abort(txn); + throw; + } + + console.Info("Commit txn: " + txn.Id); + client.Commit(txn); + } + } +} diff --git a/AerospikeTest/Async/TestAsyncTxn.cs b/AerospikeTest/Async/TestAsyncTxn.cs index c52b7289..3e7a7f6a 100644 --- a/AerospikeTest/Async/TestAsyncTxn.cs +++ b/AerospikeTest/Async/TestAsyncTxn.cs @@ -76,7 +76,7 @@ public void AsyncTxnWriteTwice() [TestMethod] public void AsyncTxnWriteBlock() { - Key key = new(args.ns, args.set, "asyncTxnWriteBlock1111"); + Key key = new(args.ns, args.set, "asyncTxnWriteBlock"); Txn txn = new(); var cmds = new Runner[] @@ -348,6 +348,92 @@ public void AsyncTxnBatchAbort() Execute(cmds); } + [TestMethod] + public void AsyncTxnWriteCommitAbort() + { + Key key = new(args.ns, args.set, "asyncTxnCommitAbort"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new Commit(txn), + new GetExpect(null, key, "val2"), + new Abort(txn, AbortStatus.AbortStatusType.ALREADY_ATTEMPTED) + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnWriteReadTwoTxn() + { + Key key = new(args.ns, args.set, "asyncTxnWriteReadTwoTxn"); + Txn txn1 = new(); + Txn txn2 = new(); + + var cmds = new Runner[] + { + new Put(null, key, "val1"), + new GetExpect(txn1, key, "val1"), + new GetExpect(txn2, key, "val1"), + new Commit(txn1), + new Commit(txn2), + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnLUTCommit() // Test Case 38 + { + Key key1 = new(args.ns, args.set, "asyncTxnLUTCommit1"); + Key key2 = new(args.ns, args.set, "asyncTxnLUTCommit2"); + Key key3 = new(args.ns, args.set, "asyncTxnLUTCommit3"); + Txn txn = new(); // T1 + + var cmds = new Runner[] + { + new Delete(null, key1), // Prep + new Delete(null, key2), + new Delete(null, key3), + new Put(txn, key1, "val1"), // T1 + new GetExpect(txn, key1, "val1", 1), // T2 + new Put(txn, key1, "val11"), // T3 + new GetExpect(txn, key1, "val11", 2), // T4 + new Put(null, key2, "val1"), // T5 + new GetExpect(txn, key2, "val1", 1), // T6 + new Put(txn, key2, "val11"), // T7 + new GetExpect(txn, key2, "val11", 2), // T8 + new Put(txn, key3, "val1"), // T9 + new GetExpect(txn, key3, "val1", 1), // T10 + new Commit(txn), // T11 + new GetExpect(txn, key1, "val11", 3), // T12 + new GetExpect(txn, key2, "val11", 3), + new GetExpect(txn, key3, "val1", 2) + }; + + Execute(cmds); + } + + [TestMethod] + public void AsyncTxnWriteAfterCommit() + { + Key key = new(args.ns, args.set, "asyncTxnWriteAfter"); + Txn txn = new(); + + var cmds = new Runner[] + { + new Put(txn, key, "val1"), + new Commit(txn), + new Sleep(1000), + new Put(txn, key, "val1", ResultCode.MRT_EXPIRED), + }; + + Execute(cmds); + } + private void Execute(Runner[] cmdArray) { Cmds a = new(this, cmdArray); @@ -436,64 +522,102 @@ public void OnFailure(Exception e, int expectedResult) public class Commit : Runner { private readonly Txn txn; + private readonly bool throwsCommitException; public Commit(Txn txn) { this.txn = txn; + this.throwsCommitException = false; + } + + public Commit(Txn txn, bool throwsCommitException) + { + this.txn = txn; + this.throwsCommitException = throwsCommitException; } public void Run(TestAsyncTxn parent, Listener listener) { - client.Commit(new CommitHandler(listener), txn); + client.Commit(new CommitHandler(listener, throwsCommitException), txn); } private class CommitHandler : CommitListener { private readonly Listener listener; + private readonly bool throwsCommitException; - public CommitHandler(Listener listener) + public CommitHandler(Listener listener, bool throwsCommitException) { this.listener = listener; + this.throwsCommitException = throwsCommitException; } public void OnSuccess(CommitStatusType status) { - listener.OnSuccess(); + if (status == CommitStatusType.OK) + { + listener.OnSuccess(); + return; + } + listener.OnFailure(); } public void OnFailure(AerospikeException.Commit e) { + if (throwsCommitException) + { + listener.OnSuccess(); + return; + } + listener.OnFailure(e); } } } + + public class Abort : Runner { private readonly Txn txn; + private readonly AbortStatusType status; public Abort(Txn txn) { this.txn = txn; + this.status = AbortStatusType.OK; + } + + public Abort(Txn txn, AbortStatusType abortStatus) + { + this.txn = txn; + this.status = abortStatus; } public void Run(TestAsyncTxn parent, Listener listener) { - client.Abort(new AbortHandler(listener), txn); + client.Abort(new AbortHandler(listener, status), txn); } private class AbortHandler : AbortListener { private readonly Listener listener; + private readonly AbortStatusType status; - public AbortHandler(Listener listener) + public AbortHandler(Listener listener, AbortStatusType status) { this.listener = listener; + this.status = status; } public void OnSuccess(AbortStatusType status) { - listener.OnSuccess(); + if (status == this.status) + { + listener.OnSuccess(); + return; + } + listener.OnFailure(); } } } @@ -573,12 +697,22 @@ public class GetExpect : Runner private readonly Txn txn; private readonly Key key; private readonly string expect; + private readonly int generation; public GetExpect(Txn txn, Key key, string expect) { this.txn = txn; this.key = key; this.expect = expect; + generation = 0; // Do not check generation + } + + public GetExpect(Txn txn, Key key, string expect, int generation) + { + this.txn = txn; + this.key = key; + this.expect = expect; + this.generation = generation; } public void Run(TestAsyncTxn parent, Listener listener) @@ -590,7 +724,7 @@ public void Run(TestAsyncTxn parent, Listener listener) p = client.ReadPolicyDefault; p.Txn = txn; } - client.Get(p, new GetExpectHandler(parent, listener, expect), key); + client.Get(p, new GetExpectHandler(parent, listener, expect, generation), key); } private class GetExpectHandler : RecordListener @@ -598,16 +732,26 @@ private class GetExpectHandler : RecordListener private readonly TestAsyncTxn parent; private readonly Listener listener; private string expect; + private int generation; - public GetExpectHandler(TestAsyncTxn parent, Listener listener, string expect) + public GetExpectHandler(TestAsyncTxn parent, Listener listener, string expect, int generation) { this.parent = parent; this.listener = listener; this.expect = expect; + this.generation = generation; } public void OnSuccess(Key key, Record record) { + if (generation != 0) + { + if (generation != record.generation) + { + listener.OnFailure(new AssertFailedException("Expected generation: " + generation + " but got: " + record.generation)); + } + } + if (expect != null) { if (parent.AssertBinEqual(key, record, binName, expect)) @@ -981,6 +1125,22 @@ public void OnFailure(AerospikeException e) } } + public class Sleep : Runner + { + private readonly int sleepMillis; + + public Sleep(int sleepMillis) + { + this.sleepMillis = sleepMillis; + } + + public void Run(TestAsyncTxn parent, Listener listener) + { + Util.Sleep(sleepMillis); + parent.NotifyCompleted(); + } + } + public interface Runner { void Run(TestAsyncTxn parent, Listener listener); diff --git a/AerospikeTest/Sync/Basic/TestTxn.cs b/AerospikeTest/Sync/Basic/TestTxn.cs index f49bb36e..61306d5f 100644 --- a/AerospikeTest/Sync/Basic/TestTxn.cs +++ b/AerospikeTest/Sync/Basic/TestTxn.cs @@ -18,7 +18,6 @@ using Aerospike.Client; using System.Reflection; using System.Text; -using Microsoft.AspNetCore.DataProtection.KeyManagement; namespace Aerospike.Test { @@ -41,7 +40,7 @@ public static void Prepare(TestContext testContext) [TestMethod] public void TxnWrite() { - Key key = new(args.ns, args.set, "mrtkey111"); + Key key = new(args.ns, args.set, "mrtkey1"); client.Put(null, key, new Bin(binName, "val1")); @@ -78,7 +77,7 @@ public void TxnWriteTwice() [TestMethod] public void TxnWriteConflict() { - Key key = new(args.ns, args.set, "mrtkey21"); + Key key = new(args.ns, args.set, "mrtkey021"); Txn txn1 = new(); Txn txn2 = new(); @@ -93,6 +92,7 @@ public void TxnWriteConflict() try { client.Put(wp2, key, new Bin(binName, "val2")); + throw new AerospikeException("Unexpected success"); } catch (AerospikeException ae) { @@ -399,7 +399,7 @@ public void TxnBatch() if (!bresults.status) { - StringBuilder sb = new StringBuilder(); + StringBuilder sb = new(); sb.Append("Batch failed:"); sb.Append(System.Environment.NewLine); @@ -453,7 +453,7 @@ public void TxnBatchAbort() if (!bresults.status) { - StringBuilder sb = new StringBuilder(); + StringBuilder sb = new(); sb.Append("Batch failed:"); sb.Append(System.Environment.NewLine); @@ -533,46 +533,50 @@ record = client.Get(rp2, key); } [TestMethod] - public void TxnLUTCommit() + public void TxnLUTCommit() // Test Case 38 { - Txn txn = new(); + Txn txn = new(); // T0 Key key1 = new(args.ns, args.set, "mrtkey17"); Key key2 = new(args.ns, args.set, "mrtkey18"); Key key3 = new(args.ns, args.set, "mrtkey19"); + client.Delete(null, key1); + client.Delete(null, key2); + client.Delete(null, key3); + var wp = client.WritePolicyDefault; wp.Txn = txn; - client.Put(wp, key1, new Bin(binName, "val1")); + client.Put(wp, key1, new Bin(binName, "val1")); // T1 var p = client.ReadPolicyDefault; p.Txn = txn; - var record = client.Get(p, key1); + var record = client.Get(p, key1); // T2 Assert.AreEqual(1, record.generation); - client.Put(wp, key1, new Bin(binName, "val11")); + client.Put(wp, key1, new Bin(binName, "val11")); // T3 - record = client.Get(p, key1); + record = client.Get(p, key1); // T4 Assert.AreEqual(2, record.generation); - client.Put(null, key2, new Bin(binName, "val1")); + client.Put(null, key2, new Bin(binName, "val1")); // T5 - record = client.Get(p, key2); + record = client.Get(p, key2); // T6 Assert.AreEqual(1, record.generation); - client.Put(wp, key2, new Bin(binName, "val11")); + client.Put(wp, key2, new Bin(binName, "val11")); // T7 - record = client.Get(p, key2); + record = client.Get(p, key2); // T8 Assert.AreEqual(2, record.generation); - client.Put(wp, key3, new Bin(binName, "val1")); + client.Put(wp, key3, new Bin(binName, "val1")); // T9 - record = client.Get(p, key3); + record = client.Get(p, key3); // T10 Assert.AreEqual(1, record.generation); - client.Commit(txn); + client.Commit(txn); // T11 - record = client.Get(null, key1); + record = client.Get(null, key1); // T12 Assert.AreEqual(3, record.generation); record = client.Get(null, key2); Assert.AreEqual(3, record.generation); @@ -580,6 +584,117 @@ record = client.Get(null, key3); Assert.AreEqual(2, record.generation); } + [TestMethod] + public void TxnLUTAbort() // Test Case 39 + { + client.Truncate(null, args.ns, args.set, DateTime.Now); + + Txn txn = new(); // T0 + + Key key1 = new(args.ns, args.set, "mrtkey20"); + Key key2 = new(args.ns, args.set, "mrtkey21"); + Key key3 = new(args.ns, args.set, "mrtkey22"); + + //client.Delete(null, key1); + //client.Delete(null, key2); + //client.Delete(null, key3); + + client.Put(null, key1, new Bin(binName, "val1")); // T1 + + var p = client.ReadPolicyDefault; + p.Txn = txn; + var record = client.Get(p, key1); // T2 + Assert.AreEqual(1, record.generation); + + var binR2O = new Bin(binName, "val2"); + client.Put(null, key2, binR2O); // T3 + record = client.Get(p, key2); // T4 + Assert.AreEqual(1, record.generation); + + var wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key2, new Bin(binName, "val11")); // T5 + + record = client.Get(p, key2); + Assert.AreEqual(2, record.generation); + + record = client.Get(null, key2); // T6 + Assert.AreEqual(1, record.generation); + + client.Put(wp, key3, new Bin(binName, "val3")); // T7 + record = client.Get(p, key3); + Assert.AreEqual(1, record.generation); + + var binR1UO = new Bin(binName, "val1"); // T8 + client.Put(null, key1, binR1UO); + record = client.Get(null, key1); + Assert.AreEqual(2, record.generation); + + try + { + client.Put(wp, key1, new Bin(binName, "val1111")); // T9 + record = client.Get(p, key1); + Assert.AreEqual(2, record.generation); + throw new AerospikeException("Unexpected success"); + } + catch (AerospikeException ae) + { + if (ae.Result != ResultCode.MRT_CONFLICT) + { + throw; + } + } + + try + { + client.Commit(txn); // T10 + } + catch (AerospikeException.Commit ae) + { + + } + + record = client.Get(null, key1); // T11 + Assert.AreEqual(2, record.generation); + AssertBinEqual(key1, record, binR1UO); + record = client.Get(null, key2); + Assert.AreEqual(3, record.generation); + AssertBinEqual(key2, record, binR2O); + record = client.Get(null, key3); + Assert.IsNull(record); + + // Cleanup + client.Abort(txn); + } + + [TestMethod] + public void TxnWriteAfterCommit() + { + Key key = new(args.ns, args.set, "mrtkey23"); + + Txn txn = new(); + + WritePolicy wp = client.WritePolicyDefault; + wp.Txn = txn; + client.Put(wp, key, new Bin(binName, "val1")); + + client.Commit(txn); + + try + { + client.Put(wp, key, new Bin(binName, "val1")); + throw new AerospikeException("Unexpected success"); + } + catch (AerospikeException ae) + { + if (ae.Result != ResultCode.MRT_EXPIRED) + { + throw; + } + } + + } + [TestMethod] public void TxnInvalidNamespace() { @@ -605,7 +720,7 @@ public void TxnInvalidNamespace() } } - private void AssertBatchEqual(Key[] keys, Record[] recs, int expected) + private static void AssertBatchEqual(Key[] keys, Record[] recs, int expected) { for (int i = 0; i < keys.Length; i++) { From fe76abd133fc5e1294d45ccaf0cae46e1de03fe8 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Wed, 23 Oct 2024 13:24:25 -0600 Subject: [PATCH 08/41] Add option to force single node, track mrt state --- AerospikeClient/Async/AsyncClient.cs | 54 +++++++++++---- AerospikeClient/Async/AsyncCluster.cs | 4 +- AerospikeClient/Async/AsyncCommand.cs | 4 +- AerospikeClient/Async/AsyncConnectionTls.cs | 2 +- .../Async/AsyncTxnMarkRollForward.cs | 4 +- AerospikeClient/Async/AsyncTxnRoll.cs | 35 +++++++--- AerospikeClient/Cluster/Cluster.cs | 68 +++++++++++++++++++ AerospikeClient/Cluster/ConnectionRecover.cs | 2 +- AerospikeClient/Cluster/Node.cs | 2 +- AerospikeClient/Command/SyncCommand.cs | 2 +- AerospikeClient/Command/TxnMarkRollForward.cs | 4 +- AerospikeClient/Command/TxnRoll.cs | 20 ++++-- AerospikeClient/Exp/Exp.cs | 4 +- AerospikeClient/Main/AbortStatus.cs | 6 +- AerospikeClient/Main/AerospikeClient.cs | 47 +++++++++---- AerospikeClient/Main/CommitStatus.cs | 6 +- AerospikeClient/Main/ResultCode.cs | 20 +++++- AerospikeClient/Main/Txn.cs | 27 ++++---- AerospikeClient/Policy/ClientPolicy.cs | 13 +++- AerospikeClient/Policy/QueryPolicy.cs | 2 +- AerospikeClient/Query/PartitionTracker.cs | 6 +- AerospikeTest/Async/TestAsyncTxn.cs | 2 +- AerospikeTest/Sync/Basic/TestTxn.cs | 2 +- 23 files changed, 255 insertions(+), 81 deletions(-) diff --git a/AerospikeClient/Async/AsyncClient.cs b/AerospikeClient/Async/AsyncClient.cs index a67587bd..f3be119f 100644 --- a/AerospikeClient/Async/AsyncClient.cs +++ b/AerospikeClient/Async/AsyncClient.cs @@ -59,7 +59,7 @@ public class AsyncClient : AerospikeClient, IAsyncClient /// /// /// If the connection succeeds, the client is ready to process database requests. - /// If the connection fails, the cluster will remain in a disconnected state + /// If the connection fails, the cluster will remain in a disconnected State /// until the server is activated. /// /// @@ -83,7 +83,7 @@ public AsyncClient(string hostname, int port) /// /// If the connection succeeds, the client is ready to process database requests. /// If the connection fails and the policy's failOnInvalidHosts is true, a connection - /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state + /// exception will be thrown. Otherwise, the cluster will remain in a disconnected State /// until the server is activated. /// /// @@ -112,7 +112,7 @@ public AsyncClient(AsyncClientPolicy policy, string hostname, int port) /// /// If one connection succeeds, the client is ready to process database requests. /// If all connections fail and the policy's failIfNotConnected is true, a connection - /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state + /// exception will be thrown. Otherwise, the cluster will remain in a disconnected State /// until the server is activated. /// /// @@ -150,15 +150,30 @@ public AsyncClient(AsyncClientPolicy policy, params Host[] hosts) /// multi-record transaction public void Commit(CommitListener listener, Txn txn) { - if (!txn.SetRollAttempted()) - { - listener.OnSuccess(CommitStatus.CommitStatusType.ALREADY_ATTEMPTED); - } - AsyncTxnRoll atr = new( cluster, txnVerifyPolicyDefault, txnRollPolicyDefault, txn ); - atr.Commit(listener); + + switch (txn.State) + { + default: + case Txn.TxnState.OPEN: + atr.Verify(listener); + break; + + case Txn.TxnState.VERIFIED: + atr.Verify(listener); + break; + + case Txn.TxnState.COMMITTED: + listener.OnSuccess(CommitStatus.CommitStatusType.ALREADY_COMMITTED); + break; + + case Txn.TxnState.ABORTED: + listener.OnSuccess(CommitStatus.CommitStatusType.ALREADY_ABORTED); + break; + + } } /// @@ -187,13 +202,24 @@ public Task Commit(Txn txn, CancellationToken token) /// multi-record transaction public void Abort(AbortListener listener, Txn txn) { - if (!txn.SetRollAttempted()) + AsyncTxnRoll atr = new(cluster, null, txnRollPolicyDefault, txn); + + switch (txn.State) { - listener.OnSuccess(AbortStatus.AbortStatusType.ALREADY_ATTEMPTED); - } + default: + case Txn.TxnState.OPEN: + case Txn.TxnState.VERIFIED: + atr.Abort(listener); + break; - AsyncTxnRoll atr = new(cluster, null, txnRollPolicyDefault, txn); - atr.Abort(listener); + case Txn.TxnState.COMMITTED: + listener.OnSuccess(AbortStatus.AbortStatusType.ALREADY_COMMITTED); + break; + + case Txn.TxnState.ABORTED: + listener.OnSuccess(AbortStatus.AbortStatusType.ALREADY_ABORTED); + break; + } } public Task Abort(Txn txn, CancellationToken token) diff --git a/AerospikeClient/Async/AsyncCluster.cs b/AerospikeClient/Async/AsyncCluster.cs index 213a2202..43190a16 100644 --- a/AerospikeClient/Async/AsyncCluster.cs +++ b/AerospikeClient/Async/AsyncCluster.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -71,7 +71,7 @@ public AsyncCluster(AsyncClientPolicy policy, Host[] hosts) throw new AerospikeException(ResultCode.PARAMETER_ERROR, "Unsupported MaxCommandAction value: " + policy.asyncMaxCommandAction.ToString()); } - InitTendThread(policy.failIfNotConnected); + StartTendThread(policy); } protected internal override Node CreateNode(NodeValidator nv, bool createMinConn) diff --git a/AerospikeClient/Async/AsyncCommand.cs b/AerospikeClient/Async/AsyncCommand.cs index a282f3a8..3221ac14 100644 --- a/AerospikeClient/Async/AsyncCommand.cs +++ b/AerospikeClient/Async/AsyncCommand.cs @@ -180,14 +180,14 @@ private void ExecuteCore() { if (totalTimeout > 0) { - // Timeout already added in Execute(). Verify state. + // Timeout already added in Execute(). Verify State. if (state != IN_PROGRESS) { // Total timeout might have occurred if command was in the delay queue. // Socket timeout should not be possible for commands in the delay queue. if (state != FAIL_TOTAL_TIMEOUT) { - Log.Error(cluster.context, "Unexpected state at async command start: " + state); + Log.Error(cluster.context, "Unexpected State at async command start: " + state); } // User has already been notified of the total timeout. Release buffer and // return for all error states. diff --git a/AerospikeClient/Async/AsyncConnectionTls.cs b/AerospikeClient/Async/AsyncConnectionTls.cs index 1fa32aa1..80819d41 100644 --- a/AerospikeClient/Async/AsyncConnectionTls.cs +++ b/AerospikeClient/Async/AsyncConnectionTls.cs @@ -202,7 +202,7 @@ private void ReceiveEvent(IAsyncResult result) { // Do not call command completed methods because that can result in a new // Send()/Receive() call before the current sync BeginRead() has completed. - // Instead, set the state which will be handled after BeginRead() is done. + // Instead, set the State which will be handled after BeginRead() is done. try { int received = sslStream.EndRead(result); diff --git a/AerospikeClient/Async/AsyncTxnMarkRollForward.cs b/AerospikeClient/Async/AsyncTxnMarkRollForward.cs index f4466307..c050de4e 100644 --- a/AerospikeClient/Async/AsyncTxnMarkRollForward.cs +++ b/AerospikeClient/Async/AsyncTxnMarkRollForward.cs @@ -53,9 +53,9 @@ protected internal override bool ParseResult() ParseHeader(); ParseFields(policy.Txn, Key, true); - // BIN_EXISTS_ERROR is considered a success because it means a previous attempt already + // MRT_COMMITTED is considered a success because it means a previous attempt already // succeeded in notifying the server that the MRT will be rolled forward. - if (resultCode == ResultCode.OK || resultCode == ResultCode.BIN_EXISTS_ERROR) + if (resultCode == ResultCode.OK || resultCode == ResultCode.MRT_COMMITTED) { return true; } diff --git a/AerospikeClient/Async/AsyncTxnRoll.cs b/AerospikeClient/Async/AsyncTxnRoll.cs index d88f629b..3f2119fb 100644 --- a/AerospikeClient/Async/AsyncTxnRoll.cs +++ b/AerospikeClient/Async/AsyncTxnRoll.cs @@ -50,15 +50,35 @@ Txn txn this.txnKey = TxnMonitor.GetTxnMonitorKey(txn); } - public void Commit(CommitListener listener) + public void Verify(CommitListener listener) { commitListener = listener; Verify(new VerifyListener(this)); } + public void Commit(CommitListener listener) + { + commitListener = listener; + Commit(); + } + + private void Commit() + { + if (txn.MonitorExists()) + { + MarkRollForward(); + } + else + { + txn.State = Txn.TxnState.COMMITTED; + CloseOnCommit(true); + } + } + public void Abort(AbortListener listener) { abortListener = listener; + txn.State = Txn.TxnState.ABORTED; Roll(new RollListener(this), Command.INFO4_MRT_ROLL_BACK); } @@ -306,18 +326,12 @@ public void OnSuccess(BatchRecord[] records, bool status) if (status) { - if (command.txn.MonitorExists()) - { - command.MarkRollForward(); - } - else - { - // There is nothing to roll-forward. - command.CloseOnCommit(true); - } + command.txn.State = Txn.TxnState.VERIFIED; + command.Commit(); } else { + command.txn.State = Txn.TxnState.ABORTED; command.RollBack(); } } @@ -371,6 +385,7 @@ public MarkRollForwardListener(AsyncTxnRoll command) public void OnSuccess(Key key) { + command.txn.State = Txn.TxnState.VERIFIED; command.RollForward(); } diff --git a/AerospikeClient/Cluster/Cluster.cs b/AerospikeClient/Cluster/Cluster.cs index 2e96e127..190c6b78 100644 --- a/AerospikeClient/Cluster/Cluster.cs +++ b/AerospikeClient/Cluster/Cluster.cs @@ -271,6 +271,74 @@ public Cluster(ClientPolicy policy, Host[] hosts) cancelToken = cancel.Token; } + public void StartTendThread(ClientPolicy policy) + { + if (policy.forceSingleNode) + { + // Communicate with the first seed node only. + // Do not run cluster tend thread. + try + { + ForceSingleNode(); + } + catch (Exception) + { + Close(); + throw; + } + } + else + { + InitTendThread(policy.failIfNotConnected); + } + } + + public void ForceSingleNode() + { + // Initialize tendThread, but do not start it. + tendValid = true; + tendThread = new Thread(new ThreadStart(this.Run)); + + // Validate first seed. + Host seed = seeds[0]; + NodeValidator nv = new(); + Node node = null; + + try + { + node = nv.SeedNode(this, seed, null); + } + catch (Exception e) + { + throw new AerospikeException("Seed " + seed + " failed: " + e.Message, e); + } + + node.CreateMinConnections(); + + // Add seed node to nodes. + Dictionary nodesToAdd = new(1); + nodesToAdd[node.Name] = node; + AddNodes(nodesToAdd); + + // Initialize partitionMaps. + Peers peers = new(nodes.Length + 16); + node.RefreshPartitions(peers); + + // Set partition maps for all namespaces to point to same node. + foreach (Partitions partitions in partitionMap.Values) + { + foreach (Node[] nodeArray in partitions.replicas) + { + int max = nodeArray.Length; + + for (int i = 0; i < max; i++) + { + nodeArray[i] = node; + } + } + } + } + public virtual void InitTendThread(bool failIfNotConnected) { // Tend cluster until all nodes identified. diff --git a/AerospikeClient/Cluster/ConnectionRecover.cs b/AerospikeClient/Cluster/ConnectionRecover.cs index 4127e1cc..5d1ffe1e 100644 --- a/AerospikeClient/Cluster/ConnectionRecover.cs +++ b/AerospikeClient/Cluster/ConnectionRecover.cs @@ -284,7 +284,7 @@ private void ParseProto(byte[] buf, int bytesRead) if (compressed) { // Do not recover connections with compressed data because that would - // require saving large buffers with associated state and performing decompression + // require saving large buffers with associated State and performing decompression // just to drain the connection. throw new AerospikeException("Recovering connections with compressed multi-record data is not supported"); } diff --git a/AerospikeClient/Cluster/Node.cs b/AerospikeClient/Cluster/Node.cs index b4798a76..d9887d30 100644 --- a/AerospikeClient/Cluster/Node.cs +++ b/AerospikeClient/Cluster/Node.cs @@ -740,7 +740,7 @@ public Connection GetConnection(int timeoutMillis, int timeoutDelay) { if (timeoutDelay > 0) { - // The connection state is always STATE_READ_AUTH_HEADER here which does not reference + // The connection State is always STATE_READ_AUTH_HEADER here which does not reference // isSingle, so just pass in true for isSingle in ConnectionRecover. cluster.RecoverConnection(new ConnectionRecover(conn, this, timeoutDelay, crt, true)); conn = null; diff --git a/AerospikeClient/Command/SyncCommand.cs b/AerospikeClient/Command/SyncCommand.cs index 7e16718b..2ea510c2 100644 --- a/AerospikeClient/Command/SyncCommand.cs +++ b/AerospikeClient/Command/SyncCommand.cs @@ -241,7 +241,7 @@ public void ExecuteCommand() } catch (AerospikeException.Backoff be) { - // Node is in backoff state. Retry, hopefully on another node. + // Node is in backoff State. Retry, hopefully on another node. exception = be; isClientTimeout = false; node.AddError(); diff --git a/AerospikeClient/Command/TxnMarkRollForward.cs b/AerospikeClient/Command/TxnMarkRollForward.cs index 01e9e347..7fd46b18 100644 --- a/AerospikeClient/Command/TxnMarkRollForward.cs +++ b/AerospikeClient/Command/TxnMarkRollForward.cs @@ -34,9 +34,9 @@ protected internal override void ParseResult(IConnection conn) ParseHeader(conn); ParseFields(policy.Txn, key, true); - // BIN_EXISTS_ERROR is considered a success because it means a previous attempt already + // MRT_COMMITTED is considered a success because it means a previous attempt already // succeeded in notifying the server that the MRT will be rolled forward. - if (resultCode == ResultCode.OK || resultCode == ResultCode.BIN_EXISTS_ERROR) + if (resultCode == ResultCode.OK || resultCode == ResultCode.MRT_COMMITTED) { return; } diff --git a/AerospikeClient/Command/TxnRoll.cs b/AerospikeClient/Command/TxnRoll.cs index 3b996168..4f411281 100644 --- a/AerospikeClient/Command/TxnRoll.cs +++ b/AerospikeClient/Command/TxnRoll.cs @@ -34,7 +34,7 @@ public TxnRoll(Cluster cluster, Txn txn) this.txn = txn; } - public CommitStatusType Commit(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) + public void Verify(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) { WritePolicy writePolicy; Key txnKey; @@ -42,11 +42,12 @@ public CommitStatusType Commit(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) try { // Verify read versions in batch. - Verify(verifyPolicy); + VerifyRecordVersions(verifyPolicy); } catch (Exception e) { // Verify failed. Abort. + txn.State = Txn.TxnState.ABORTED; try { Roll(rollPolicy, Command.INFO4_MRT_ROLL_BACK); @@ -76,8 +77,13 @@ public CommitStatusType Commit(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) throw OnCommitError(CommitErrorType.VERIFY_FAIL, e, false); } - writePolicy = new WritePolicy(rollPolicy); - txnKey = TxnMonitor.GetTxnMonitorKey(txn); + txn.State = Txn.TxnState.VERIFIED; + } + + public CommitStatusType Commit(BatchPolicy rollPolicy) + { + var writePolicy = new WritePolicy(rollPolicy); + var txnKey = TxnMonitor.GetTxnMonitorKey(txn); if (txn.MonitorExists()) { @@ -91,6 +97,8 @@ public CommitStatusType Commit(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) throw OnCommitError(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, e, true); } + txn.State = Txn.TxnState.COMMITTED; + // Roll-forward writes in batch. try { @@ -158,6 +166,8 @@ private AerospikeException.Commit OnCommitError(CommitErrorType error, Exception public AbortStatusType Abort(BatchPolicy rollPolicy) { + txn.State = Txn.TxnState.ABORTED; + try { Roll(rollPolicy, Command.INFO4_MRT_ROLL_BACK); @@ -183,7 +193,7 @@ public AbortStatusType Abort(BatchPolicy rollPolicy) return AbortStatusType.OK; } - private void Verify(BatchPolicy verifyPolicy) + private void VerifyRecordVersions(BatchPolicy verifyPolicy) { // Validate record versions in a batch. int count = 0; diff --git a/AerospikeClient/Exp/Exp.cs b/AerospikeClient/Exp/Exp.cs index 692077b6..dd3e0de9 100644 --- a/AerospikeClient/Exp/Exp.cs +++ b/AerospikeClient/Exp/Exp.cs @@ -408,12 +408,12 @@ public static Exp TTL() } /// - /// Create expression that returns if record has been deleted and is still in tombstone state. + /// Create expression that returns if record has been deleted and is still in tombstone State. /// This expression usually evaluates quickly because record meta data is cached in memory. /// /// /// - /// // Deleted records that are in tombstone state. + /// // Deleted records that are in tombstone State. /// Exp.isTombstone() /// /// diff --git a/AerospikeClient/Main/AbortStatus.cs b/AerospikeClient/Main/AbortStatus.cs index 3bbe81e1..94f2dee2 100644 --- a/AerospikeClient/Main/AbortStatus.cs +++ b/AerospikeClient/Main/AbortStatus.cs @@ -25,7 +25,8 @@ public static class AbortStatus public enum AbortStatusType { OK, - ALREADY_ATTEMPTED, + ALREADY_COMMITTED, + ALREADY_ABORTED, ROLL_BACK_ABANDONED, CLOSE_ABANDONED } @@ -35,7 +36,8 @@ public static string AbortErrorToString(AbortStatusType status) return status switch { AbortStatusType.OK => "Abort succeeded.", - AbortStatusType.ALREADY_ATTEMPTED => "Abort or commit already attempted.", + AbortStatusType.ALREADY_COMMITTED => "Already committed.", + AbortStatusType.ALREADY_ABORTED => "Already aborted.", AbortStatusType.ROLL_BACK_ABANDONED => "MRT client roll back abandoned. Server will eventually abort the MRT.", AbortStatusType.CLOSE_ABANDONED => "MRT has been rolled back, but MRT client close was abandoned. Server will eventually close the MRT.", _ => "Unexpected AbortStatusType." diff --git a/AerospikeClient/Main/AerospikeClient.cs b/AerospikeClient/Main/AerospikeClient.cs index f85c83bd..a08341ca 100644 --- a/AerospikeClient/Main/AerospikeClient.cs +++ b/AerospikeClient/Main/AerospikeClient.cs @@ -126,7 +126,7 @@ public class AerospikeClient : IDisposable, IAerospikeClient /// /// /// If the connection succeeds, the client is ready to process database requests. - /// If the connection fails, the cluster will remain in a disconnected state + /// If the connection fails, the cluster will remain in a disconnected State /// until the server is activated. /// /// @@ -150,7 +150,7 @@ public AerospikeClient(string hostname, int port) /// /// If the connection succeeds, the client is ready to process database requests. /// If the connection fails and the policy's failOnInvalidHosts is true, a connection - /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state + /// exception will be thrown. Otherwise, the cluster will remain in a disconnected State /// until the server is activated. /// /// @@ -179,7 +179,7 @@ public AerospikeClient(ClientPolicy policy, string hostname, int port) /// /// If one connection succeeds, the client is ready to process database requests. /// If all connections fail and the policy's failIfNotConnected is true, a connection - /// exception will be thrown. Otherwise, the cluster will remain in a disconnected state + /// exception will be thrown. Otherwise, the cluster will remain in a disconnected State /// until the server is activated. /// /// @@ -207,7 +207,7 @@ public AerospikeClient(ClientPolicy policy, params Host[] hosts) this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); cluster = new Cluster(policy, hosts); - cluster.InitTendThread(policy.failIfNotConnected); + cluster.StartTendThread(policy); } /// @@ -474,13 +474,24 @@ public ClusterStats GetClusterStats() /// multi-record transaction public CommitStatus.CommitStatusType Commit(Txn txn) { - if (!txn.SetRollAttempted()) + TxnRoll tr = new(cluster, txn); + + switch (txn.State) { - return CommitStatus.CommitStatusType.ALREADY_ATTEMPTED; + default: + case Txn.TxnState.OPEN: + tr.Verify(txnVerifyPolicyDefault, txnRollPolicyDefault); + return tr.Commit(txnRollPolicyDefault); + + case Txn.TxnState.VERIFIED: + return tr.Commit(txnRollPolicyDefault); + + case Txn.TxnState.COMMITTED: + return CommitStatus.CommitStatusType.ALREADY_COMMITTED; + + case Txn.TxnState.ABORTED: + return CommitStatus.CommitStatusType.ALREADY_ABORTED; } - - TxnRoll tr = new(cluster, txn); - return tr.Commit(txnVerifyPolicyDefault, txnRollPolicyDefault); } /// @@ -492,13 +503,21 @@ public CommitStatus.CommitStatusType Commit(Txn txn) /// multi-record transaction public AbortStatus.AbortStatusType Abort(Txn txn) { - if (!txn.SetRollAttempted()) + TxnRoll tr = new(cluster, txn); + + switch (txn.State) { - return AbortStatus.AbortStatusType.ALREADY_ATTEMPTED; - } + default: + case Txn.TxnState.OPEN: + case Txn.TxnState.VERIFIED: + return tr.Abort(txnRollPolicyDefault); - TxnRoll tr = new(cluster, txn); - return tr.Abort(txnRollPolicyDefault); + case Txn.TxnState.COMMITTED: + return AbortStatus.AbortStatusType.ALREADY_COMMITTED; + + case Txn.TxnState.ABORTED: + return AbortStatus.AbortStatusType.ALREADY_ABORTED; + } } //------------------------------------------------------- diff --git a/AerospikeClient/Main/CommitStatus.cs b/AerospikeClient/Main/CommitStatus.cs index 31ba95ad..50c9a7d2 100644 --- a/AerospikeClient/Main/CommitStatus.cs +++ b/AerospikeClient/Main/CommitStatus.cs @@ -27,7 +27,8 @@ public static class CommitStatus public enum CommitStatusType { OK, - ALREADY_ATTEMPTED, + ALREADY_COMMITTED, + ALREADY_ABORTED, ROLL_FORWARD_ABANDONED, CLOSE_ABANDONED } @@ -37,7 +38,8 @@ public static string CommitErrorToString(CommitStatusType status) return status switch { CommitStatusType.OK => "Commit succeeded.", - CommitStatusType.ALREADY_ATTEMPTED => "Commit or abort already attempted.", + CommitStatusType.ALREADY_COMMITTED => "Already committed.", + CommitStatusType.ALREADY_ABORTED => "Already aborted.", CommitStatusType.ROLL_FORWARD_ABANDONED => "MRT client roll forward abandoned. Server will eventually commit the MRT.", CommitStatusType.CLOSE_ABANDONED => "MRT has been rolled forward, but MRT client close was abandoned. Server will eventually close the MRT.", _ => "Unexpected AbortStatusType." diff --git a/AerospikeClient/Main/ResultCode.cs b/AerospikeClient/Main/ResultCode.cs index 3a93b155..6cdeb714 100644 --- a/AerospikeClient/Main/ResultCode.cs +++ b/AerospikeClient/Main/ResultCode.cs @@ -306,6 +306,18 @@ public sealed class ResultCode /// public const int XDR_KEY_BUSY = 32; + /// + /// MRT was already committed. + /// Value: 33 + /// + public const int MRT_COMMITTED = 33; + + /// + /// MRT was already aborted. + /// Value: 34 + /// + public const int MRT_ABORTED = 34; + /// /// There are no more records left for query. /// Value: 50 @@ -713,6 +725,12 @@ public static string GetResultString(int resultCode) case XDR_KEY_BUSY: return "Write can't complete until XDR finishes shipping."; + case MRT_COMMITTED: + return "MRT already committed"; + + case MRT_ABORTED: + return "MRT already aborted"; + case QUERY_END: return "Query end"; @@ -732,7 +750,7 @@ public static string GetResultString(int resultCode) return "Invalid field"; case ILLEGAL_STATE: - return "Illegal state"; + return "Illegal State"; case INVALID_USER: return "Invalid user"; diff --git a/AerospikeClient/Main/Txn.cs b/AerospikeClient/Main/Txn.cs index 367a553f..2a95d520 100644 --- a/AerospikeClient/Main/Txn.cs +++ b/AerospikeClient/Main/Txn.cs @@ -25,18 +25,28 @@ namespace Aerospike.Client /// public class Txn { + /// + /// Transaction State. + /// + public enum TxnState + { + OPEN, + VERIFIED, + COMMITTED, + ABORTED + } + private static long randomState = DateTime.UtcNow.Ticks; public long Id { get; private set; } public ConcurrentHashMap Reads { get; private set; } public HashSet Writes { get; private set; } + public TxnState State { get; set; } public string Ns { get; private set; } public int Deadline { get; set; } private bool monitorInDoubt; - private bool rollAttempted; - /// /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with default capacities. /// @@ -46,6 +56,7 @@ public Txn() Reads = new ConcurrentHashMap(); Writes = new HashSet(); Deadline = 0; + State = TxnState.OPEN; } /// @@ -68,6 +79,8 @@ public Txn(int readsCapacity, int writesCapacity) Id = CreateId(); Reads = new ConcurrentHashMap(readsCapacity); Writes = new HashSet(writesCapacity); + Deadline = 0; + State = TxnState.OPEN; } [System.Runtime.CompilerServices.MethodImpl(System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)] @@ -216,16 +229,6 @@ public bool MonitorExists() return Deadline != 0; } - public bool SetRollAttempted() - { - if (rollAttempted) - { - return false; - } - rollAttempted = true; - return true; - } - public void Clear() { Ns = null; diff --git a/AerospikeClient/Policy/ClientPolicy.cs b/AerospikeClient/Policy/ClientPolicy.cs index d3880cff..12ef85fe 100644 --- a/AerospikeClient/Policy/ClientPolicy.cs +++ b/AerospikeClient/Policy/ClientPolicy.cs @@ -14,6 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ +using System; using System.Collections.Generic; namespace Aerospike.Client @@ -147,7 +148,7 @@ public class ClientPolicy /// /// The number of cluster tend iterations that defines the window for . /// One tend iteration is defined as plus the time to tend all nodes. - /// At the end of the window, the error count is reset to zero and backoff state is removed + /// At the end of the window, the error count is reset to zero and backoff State is removed /// on all nodes. /// /// Default: 1 @@ -274,6 +275,15 @@ public class ClientPolicy /// public bool useServicesAlternate; + /// For testing purposes only. Do not modify. + /// + /// Should the AerospikeClient instance communicate with the first seed node only + /// instead of using the data partition map to determine which node to send the + /// database command. + /// + /// Default: false + public bool forceSingleNode = false; + /// /// Track server rack data. This field is useful when directing read commands to the server node /// that contains the key and exists on the same rack as the client. This serves to lower cloud @@ -343,6 +353,7 @@ public ClientPolicy(ClientPolicy other) this.tlsPolicy = (other.tlsPolicy != null) ? new TlsPolicy(other.tlsPolicy) : null; this.ipMap = other.ipMap; this.useServicesAlternate = other.useServicesAlternate; + this.forceSingleNode = other.forceSingleNode; this.rackAware = other.rackAware; this.rackId = other.rackId; this.rackIds = (other.rackIds != null) ? new List(other.rackIds) : null; diff --git a/AerospikeClient/Policy/QueryPolicy.cs b/AerospikeClient/Policy/QueryPolicy.cs index 501876f9..b5ce8331 100644 --- a/AerospikeClient/Policy/QueryPolicy.cs +++ b/AerospikeClient/Policy/QueryPolicy.cs @@ -80,7 +80,7 @@ public class QueryPolicy : Policy public bool includeBinData = true; /// - /// Terminate query if cluster is in migration state. If the server supports partition + /// Terminate query if cluster is in migration State. If the server supports partition /// queries or the query filter is null (scan), this field is ignored. /// Default: false /// diff --git a/AerospikeClient/Query/PartitionTracker.cs b/AerospikeClient/Query/PartitionTracker.cs index 4cc845fd..7895a66c 100644 --- a/AerospikeClient/Query/PartitionTracker.cs +++ b/AerospikeClient/Query/PartitionTracker.cs @@ -238,7 +238,7 @@ public List AssignPartitionsToNodes(Cluster cluster, string ns) Node node = p.GetNodeQuery(cluster, parts, part); // Use node name to check for single node equality because - // partition map may be in transitional state between + // partition map may be in transitional State between // the old and new node with the same name. if (nodeFilter != null && !nodeFilter.Name.Equals(node.Name)) { @@ -249,7 +249,7 @@ public List AssignPartitionsToNodes(Cluster cluster, string ns) if (np == null) { - // If the partition map is in a transitional state, multiple + // If the partition map is in a transitional State, multiple // NodePartitions instances (each with different partitions) // may be created for a single node. np = new NodePartitions(node, partitionsCapacity); @@ -390,7 +390,7 @@ public bool IsComplete(bool hasPartitionQuery, Policy policy, List Date: Wed, 23 Oct 2024 14:55:28 -0600 Subject: [PATCH 09/41] Add ConcurrentHashSet for use in Txn --- AerospikeClient/Async/AsyncTxnRoll.cs | 27 +++-- AerospikeClient/Command/TxnRoll.cs | 26 ++-- AerospikeClient/Main/Txn.cs | 6 +- AerospikeClient/Util/ConcurrentHashSet.cs | 138 ++++++++++++++++++++++ 4 files changed, 174 insertions(+), 23 deletions(-) create mode 100644 AerospikeClient/Util/ConcurrentHashSet.cs diff --git a/AerospikeClient/Async/AsyncTxnRoll.cs b/AerospikeClient/Async/AsyncTxnRoll.cs index 3f2119fb..05f5b732 100644 --- a/AerospikeClient/Async/AsyncTxnRoll.cs +++ b/AerospikeClient/Async/AsyncTxnRoll.cs @@ -86,7 +86,6 @@ public void Abort(AbortListener listener) private void Verify(BatchRecordArrayListener verifyListener) { // Validate record versions in a batch. - int count = 0; BatchRecord[] records = null; Key[] keys = null; long?[] versions = null; @@ -162,20 +161,28 @@ private void RollBack() private void Roll(BatchRecordArrayListener rollListener, int txnAttr) { - HashSet keySet = txn.Writes; - if (keySet.Count == 0) + BatchRecord[] records = null; + Key[] keys = null; + + bool actionPerformed = txn.Writes.PerformActionOnEachElement(max => { - rollListener.OnSuccess(new BatchRecord[0], true); - return; - } + if (max == 0) return false; - Key[] keys = keySet.ToArray(); - BatchRecord[] records = new BatchRecord[keys.Length]; + records = new BatchRecord[max]; + keys = new Key[max]; + return true; + }, + (item, count) => + { + keys[count] = item; + records[count] = new BatchRecord(item, true); + }); - for (int i = 0; i < keys.Length; i++) + if (!actionPerformed) { - records[i] = new BatchRecord(keys[i], true); + rollListener.OnSuccess(new BatchRecord[0], true); + return; } BatchAttr attr = new(); diff --git a/AerospikeClient/Command/TxnRoll.cs b/AerospikeClient/Command/TxnRoll.cs index 4f411281..54bcc6a9 100644 --- a/AerospikeClient/Command/TxnRoll.cs +++ b/AerospikeClient/Command/TxnRoll.cs @@ -196,7 +196,6 @@ public AbortStatusType Abort(BatchPolicy rollPolicy) private void VerifyRecordVersions(BatchPolicy verifyPolicy) { // Validate record versions in a batch. - int count = 0; BatchRecord[] records = null; Key[] keys = null; long?[] versions = null; @@ -228,7 +227,7 @@ private void VerifyRecordVersions(BatchPolicy verifyPolicy) List bns = BatchNode.GenerateList(cluster, verifyPolicy, keys, records, false, status); BatchCommand[] commands = new BatchCommand[bns.Count]; - count = 0; + int count = 0; foreach (BatchNode bn in bns) { @@ -253,19 +252,26 @@ private void MarkRollForward(WritePolicy writePolicy, Key txnKey) private void Roll(BatchPolicy rollPolicy, int txnAttr) { - HashSet keySet = txn.Writes; + BatchRecord[] records = null; + Key[] keys = null; - if (keySet.Count == 0) + bool actionPerformed = txn.Writes.PerformActionOnEachElement(max => { - return; - } + if (max == 0) return false; - Key[] keys = keySet.ToArray(); - BatchRecord[] records = new BatchRecord[keys.Length]; + records = new BatchRecord[max]; + keys = new Key[max]; + return true; + }, + (item, count) => + { + keys[count] = item; + records[count] = new BatchRecord(item, true); + }); - for (int i = 0; i < keys.Length; i++) + if (!actionPerformed) { - records[i] = new BatchRecord(keys[i], true); + return; } this.rollRecords = records; diff --git a/AerospikeClient/Main/Txn.cs b/AerospikeClient/Main/Txn.cs index 2a95d520..143c241e 100644 --- a/AerospikeClient/Main/Txn.cs +++ b/AerospikeClient/Main/Txn.cs @@ -40,7 +40,7 @@ public enum TxnState public long Id { get; private set; } public ConcurrentHashMap Reads { get; private set; } - public HashSet Writes { get; private set; } + public ConcurrentHashSet Writes { get; private set; } public TxnState State { get; set; } public string Ns { get; private set; } public int Deadline { get; set; } @@ -54,7 +54,7 @@ public Txn() { Id = CreateId(); Reads = new ConcurrentHashMap(); - Writes = new HashSet(); + Writes = new ConcurrentHashSet(); Deadline = 0; State = TxnState.OPEN; } @@ -78,7 +78,7 @@ public Txn(int readsCapacity, int writesCapacity) Id = CreateId(); Reads = new ConcurrentHashMap(readsCapacity); - Writes = new HashSet(writesCapacity); + Writes = new ConcurrentHashSet(writesCapacity); Deadline = 0; State = TxnState.OPEN; } diff --git a/AerospikeClient/Util/ConcurrentHashSet.cs b/AerospikeClient/Util/ConcurrentHashSet.cs new file mode 100644 index 00000000..92aae80c --- /dev/null +++ b/AerospikeClient/Util/ConcurrentHashSet.cs @@ -0,0 +1,138 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +namespace Aerospike.Client +{ + public class ConcurrentHashSet + { + private readonly ReaderWriterLockSlim _lock = new ReaderWriterLockSlim(LockRecursionPolicy.SupportsRecursion); + private readonly HashSet _hashSet; + + public ConcurrentHashSet() + { + _hashSet = new HashSet(); + } + + public ConcurrentHashSet(int capacity) + { + _hashSet = new HashSet(capacity); + } + + public bool Add(T item) + { + _lock.EnterUpgradeableReadLock(); + try + { + if (!_hashSet.Contains(item)) + { + _lock.EnterWriteLock(); + try + { + _hashSet.Add(item); + } + finally + { + _lock.ExitWriteLock(); + } + return true; + } + } + finally + { + _lock.ExitUpgradeableReadLock(); + } + return false; + } + + public void Clear() + { + try + { + _lock.EnterWriteLock(); + _hashSet.Clear(); + } + finally + { + if (_lock.IsWriteLockHeld) _lock.ExitWriteLock(); + } + } + + public bool Contains(T item) + { + try + { + _lock.EnterReadLock(); + return _hashSet.Contains(item); + } + finally + { + if (_lock.IsReadLockHeld) _lock.ExitReadLock(); + } + } + + public bool Remove(T item) + { + try + { + _lock.EnterWriteLock(); + return _hashSet.Remove(item); + } + finally + { + if (_lock.IsWriteLockHeld) _lock.ExitWriteLock(); + } + } + + public int Count + { + get + { + try + { + _lock.EnterReadLock(); + return _hashSet.Count; + } + finally + { + if (_lock.IsReadLockHeld) _lock.ExitReadLock(); + } + } + } + + public bool PerformActionOnEachElement(Func initilaize, Action action) + { + _lock.EnterReadLock(); + try + { + if (initilaize is null || initilaize(_hashSet.Count())) + { + int cnt = 0; + foreach (var element in _hashSet) + { + action(element, cnt++); + } + return cnt > 0; + } + } + finally + { + _lock.ExitReadLock(); + } + return false; + } + } +} From f82f6036cfa8fab887821bd13ed6ae2ba9eb12f1 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Mon, 28 Oct 2024 14:52:35 -0600 Subject: [PATCH 10/41] Add timeout and inDoubt logic to MRTs --- AerospikeClient/Async/AsyncTxnRoll.cs | 127 ++++++++++++++++------ AerospikeClient/Command/TxnMonitor.cs | 5 + AerospikeClient/Command/TxnRoll.cs | 47 ++++++-- AerospikeClient/Main/Txn.cs | 33 +++++- AerospikeClient/Policy/TxnRollPolicy.cs | 1 + AerospikeClient/Policy/TxnVerifyPolicy.cs | 1 + 6 files changed, 163 insertions(+), 51 deletions(-) diff --git a/AerospikeClient/Async/AsyncTxnRoll.cs b/AerospikeClient/Async/AsyncTxnRoll.cs index 05f5b732..b7c09c81 100644 --- a/AerospikeClient/Async/AsyncTxnRoll.cs +++ b/AerospikeClient/Async/AsyncTxnRoll.cs @@ -129,7 +129,7 @@ private void MarkRollForward() } catch (Exception t) { - NotifyCommitFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, t, false); + NotifyMarkRollForwardFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, t); } } @@ -150,12 +150,12 @@ private void RollBack() { try { - RollForwardListener rollListener = new(this); + RollBackListener rollListener = new(this); Roll(rollListener, Command.INFO4_MRT_ROLL_BACK); } catch (Exception t) { - NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, t, false); + NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, t); } } @@ -202,7 +202,7 @@ private void CloseOnCommit(bool verified) } else { - NotifyCommitFailure(CommitErrorType.VERIFY_FAIL, null, false); + NotifyCommitFailure(CommitErrorType.VERIFY_FAIL, null); } return; } @@ -218,7 +218,7 @@ private void CloseOnCommit(bool verified) NotifyCommitSuccess(CommitStatusType.CLOSE_ABANDONED); } else { - NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, e, false); + NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, e); } } } @@ -258,49 +258,102 @@ private void NotifyCommitSuccess(CommitStatusType status) } } - private void NotifyCommitFailure(CommitErrorType error, Exception cause, bool setInDoubt) + private void NotifyCommitFailure(CommitErrorType error, Exception cause) { - try + AerospikeException.Commit aec = CreateCommitException(error, cause); + + if (verifyException != null) + { + if (cause == null) + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, verifyException); + } + else + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, new[] { cause, verifyException }); + } + } + else if (cause != null) + { + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, cause); + } + else { - AerospikeException.Commit aec; + aec = new AerospikeException.Commit(error, verifyRecords, rollRecords); + } - if (verifyException != null) + NotifyCommitFailure(aec); + } + + private void NotifyMarkRollForwardFailure(CommitErrorType error, Exception cause) + { + AerospikeException.Commit aec = CreateCommitException(error, cause); + + if (cause is AerospikeException) + { + AerospikeException ae = (AerospikeException)cause; + + if (ae.Result == ResultCode.MRT_ABORTED) { - if (cause == null) - { - aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, verifyException); - } - else - { - aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, new[] { cause, verifyException }); - } + aec.SetInDoubt(false); + txn.InDoubt = false; + txn.State = Txn.TxnState.ABORTED; } - else if (cause != null) + else if (txn.InDoubt) { - aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, cause); + // The transaction was already inDoubt and just failed again, + // so the new exception should also be inDoubt. + aec.SetInDoubt(true); } - else + else if (aec.InDoubt) + { + // The current exception is inDoubt. + aec.SetInDoubt(true); + txn.InDoubt = true; + } + } + else + { + if (txn.InDoubt) { - aec = new AerospikeException.Commit(error, verifyRecords, rollRecords); + aec.SetInDoubt(true); } + } - if (cause is AerospikeException) { + NotifyCommitFailure(aec); + } + + private AerospikeException.Commit CreateCommitException(CommitErrorType error, Exception cause) + { + if (cause != null) + { + AerospikeException.Commit aec = new(error, verifyRecords, rollRecords, cause); + + if (cause is AerospikeException) + { AerospikeException src = (AerospikeException)cause; aec.Node = src.Node; aec.Policy = src.Policy; aec.Iteration = src.Iteration; - - if (setInDoubt) - { - aec.SetInDoubt(src.InDoubt); - } + aec.SetInDoubt(src.InDoubt); } + return aec; + } + else + { + return new AerospikeException.Commit(error, verifyRecords, rollRecords); + } + } + private void NotifyCommitFailure(AerospikeException.Commit aec) + { + try + { commitListener.OnFailure(aec); } - catch (Exception t) + catch (Exception e) { - Log.Error("CommitListener onFailure() failed: " + t.StackTrace); + Log.Error("CommitListener OnFailure() failed: " + e.StackTrace); } } @@ -312,9 +365,9 @@ private void NotifyAbortSuccess(AbortStatusType status) { abortListener.OnSuccess(status); } - catch (Exception t) + catch (Exception e) { - Log.Error("AbortListener onSuccess() failed: " + t.StackTrace); + Log.Error("AbortListener onSuccess() failed: " + e.StackTrace); } } @@ -347,6 +400,7 @@ public void OnFailure(BatchRecord[] records, AerospikeException ae) { command.verifyRecords = records; command.verifyException = ae; + command.txn.State = Txn.TxnState.ABORTED; command.RollBack(); } }; @@ -393,12 +447,13 @@ public MarkRollForwardListener(AsyncTxnRoll command) public void OnSuccess(Key key) { command.txn.State = Txn.TxnState.VERIFIED; + command.txn.InDoubt = false; command.RollForward(); } public void OnFailure(AerospikeException ae) { - command.NotifyCommitFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, ae, true); + command.NotifyMarkRollForwardFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, ae); } }; @@ -451,14 +506,14 @@ public void OnSuccess(BatchRecord[] records, bool status) } else { - command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, null, false); + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, null); } } public void OnFailure(BatchRecord[] records, AerospikeException ae) { command.rollRecords = records; - command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, ae, false); + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, ae); } }; @@ -482,7 +537,7 @@ public void OnSuccess(Key key, bool existed) } else { - command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL, null, false); + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL, null); } } @@ -494,7 +549,7 @@ public void OnFailure(AerospikeException ae) } else { - command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, ae, false); + command.NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, ae); } } }; diff --git a/AerospikeClient/Command/TxnMonitor.cs b/AerospikeClient/Command/TxnMonitor.cs index 7d5aabde..d75af441 100644 --- a/AerospikeClient/Command/TxnMonitor.cs +++ b/AerospikeClient/Command/TxnMonitor.cs @@ -154,6 +154,11 @@ public static WritePolicy CopyTimeoutPolicy(Policy policy) compress = policy.compress, respondAllOps = true }; + + // Note that the server only accepts the timeout on MRT monitor record create. + // The server ignores the MRT timeout field on successive MRT monitor record + // updates. + wp.expiration = policy.Txn.Timeout; return wp; } } diff --git a/AerospikeClient/Command/TxnRoll.cs b/AerospikeClient/Command/TxnRoll.cs index 54bcc6a9..219233e3 100644 --- a/AerospikeClient/Command/TxnRoll.cs +++ b/AerospikeClient/Command/TxnRoll.cs @@ -55,7 +55,7 @@ public void Verify(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) catch (Exception e2) { // Throw combination of verify and roll exceptions. - throw OnCommitError(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, e, false, e2); + throw CreateCommitException(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, e, e2); } if (txn.MonitorMightExist()) @@ -69,12 +69,12 @@ public void Verify(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) catch (Exception e3) { // Throw combination of verify and close exceptions. - throw OnCommitError(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, e, false, e3); + throw CreateCommitException(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, e, e3); } } // Throw original exception when abort succeeds. - throw OnCommitError(CommitErrorType.VERIFY_FAIL, e, false); + throw CreateCommitException(CommitErrorType.VERIFY_FAIL, e); } txn.State = Txn.TxnState.VERIFIED; @@ -92,12 +92,44 @@ public CommitStatusType Commit(BatchPolicy rollPolicy) { MarkRollForward(writePolicy, txnKey); } + catch (AerospikeException ae) + { + AerospikeException.Commit aec = CreateCommitException(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, ae); + + if (ae.Result == ResultCode.MRT_ABORTED) + { + aec.SetInDoubt(false); + txn.InDoubt = false; + txn.State = Txn.TxnState.ABORTED; + } + else if (txn.InDoubt) + { + // The transaction was already inDoubt and just failed again, + // so the new exception should also be inDoubt. + aec.SetInDoubt(true); + } + else if (ae.InDoubt) + { + // The current exception is inDoubt. + aec.SetInDoubt(true); + txn.InDoubt = true; + } + throw aec; + } catch (Exception e) { - throw OnCommitError(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, e, true); + AerospikeException.Commit aec = CreateCommitException(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, e); + + if (txn.InDoubt) + { + aec.SetInDoubt(true); + } + + throw aec; } txn.State = Txn.TxnState.COMMITTED; + txn.InDoubt = false; // Roll-forward writes in batch. try @@ -126,7 +158,7 @@ public CommitStatusType Commit(BatchPolicy rollPolicy) return CommitStatusType.OK; } - private AerospikeException.Commit OnCommitError(CommitErrorType error, Exception cause, bool setInDoubt, Exception innerException = null) + private AerospikeException.Commit CreateCommitException(CommitErrorType error, Exception cause, Exception innerException = null) { AerospikeException.Commit aec; @@ -156,10 +188,7 @@ private AerospikeException.Commit OnCommitError(CommitErrorType error, Exception aec.Node = src.Node; aec.Policy = src.Policy; aec.Iteration = src.Iteration; - if (setInDoubt) - { - aec.SetInDoubt(src.InDoubt); - } + aec.SetInDoubt(src.InDoubt); } return aec; } diff --git a/AerospikeClient/Main/Txn.cs b/AerospikeClient/Main/Txn.cs index 143c241e..9dfd3079 100644 --- a/AerospikeClient/Main/Txn.cs +++ b/AerospikeClient/Main/Txn.cs @@ -17,6 +17,9 @@ using System; using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Security.Cryptography; +using System.Threading; namespace Aerospike.Client { @@ -26,7 +29,7 @@ namespace Aerospike.Client public class Txn { /// - /// Transaction State. + /// MRT State. /// public enum TxnState { @@ -41,14 +44,30 @@ public enum TxnState public long Id { get; private set; } public ConcurrentHashMap Reads { get; private set; } public ConcurrentHashSet Writes { get; private set; } - public TxnState State { get; set; } + public TxnState State { get; internal set; } public string Ns { get; private set; } - public int Deadline { get; set; } + + /// + /// MRT deadline. The deadline is a wall clock time calculated by the server from the + /// MRT timeout that is sent by the client when creating the MRT monitor record. This deadline + /// is used to avoid client/server clock skew issues. For internal use only. + /// + internal int Deadline { get; set; } + + /// + /// MRT timeout in seconds. The timer starts when the MRT monitor record is created. + /// This occurs when the first command in the MRT is executed. If the timeout is reached before + /// a commit or abort is called, the server will expire and rollback the MRT. + /// + public int Timeout { get; set; } private bool monitorInDoubt; + public bool InDoubt { get; internal set; } + /// - /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with default capacities. + /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with + /// default capacities. The default MRT timeout is 10 seconds. /// public Txn() { @@ -57,10 +76,12 @@ public Txn() Writes = new ConcurrentHashSet(); Deadline = 0; State = TxnState.OPEN; + Timeout = 10; // seconds } /// - /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with given capacities. + /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with + /// given capacities. The default MRT timeout is 10 seconds. /// /// expected number of record reads in the MRT. Minimum value is 16. /// expected number of record writes in the MRT. Minimum value is 16. @@ -81,6 +102,7 @@ public Txn(int readsCapacity, int writesCapacity) Writes = new ConcurrentHashSet(writesCapacity); Deadline = 0; State = TxnState.OPEN; + Timeout = 10; // seconds } [System.Runtime.CompilerServices.MethodImpl(System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)] @@ -88,7 +110,6 @@ public Txn(int readsCapacity, int writesCapacity) private static long CreateId() { - // xorshift64* doesn't generate zeroes. long oldState, newState, interlockedResult; do { diff --git a/AerospikeClient/Policy/TxnRollPolicy.cs b/AerospikeClient/Policy/TxnRollPolicy.cs index e2a90034..026703ab 100644 --- a/AerospikeClient/Policy/TxnRollPolicy.cs +++ b/AerospikeClient/Policy/TxnRollPolicy.cs @@ -38,6 +38,7 @@ public TxnRollPolicy() { replica = Replica.MASTER; maxRetries = 5; + socketTimeout = 3000; totalTimeout = 10000; sleepBetweenRetries = 1000; } diff --git a/AerospikeClient/Policy/TxnVerifyPolicy.cs b/AerospikeClient/Policy/TxnVerifyPolicy.cs index 4130fc98..30bd16ed 100644 --- a/AerospikeClient/Policy/TxnVerifyPolicy.cs +++ b/AerospikeClient/Policy/TxnVerifyPolicy.cs @@ -39,6 +39,7 @@ public TxnVerifyPolicy() readModeSC = ReadModeSC.LINEARIZE; replica = Replica.MASTER; maxRetries = 5; + socketTimeout = 3000; totalTimeout = 10000; sleepBetweenRetries = 1000; } From ef53fd13e6a4a5e8bfa17c28efd43d6e65663f71 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Tue, 29 Oct 2024 15:18:07 -0600 Subject: [PATCH 11/41] Fixing typo bugs --- AerospikeClient/Async/AsyncBatch.cs | 2 +- AerospikeClient/Async/AsyncClient.cs | 2 +- .../Async/AsyncTxnMarkRollForward.cs | 2 +- AerospikeClient/Async/AsyncTxnRoll.cs | 28 ++++++++++--------- AerospikeClient/Command/TxnRoll.cs | 4 +-- AerospikeClient/Main/ResultCode.cs | 9 +++--- AerospikeClient/Main/Txn.cs | 12 ++++---- AerospikeTest/Sync/Basic/TestTxn.cs | 2 +- 8 files changed, 31 insertions(+), 30 deletions(-) diff --git a/AerospikeClient/Async/AsyncBatch.cs b/AerospikeClient/Async/AsyncBatch.cs index 4fae2e41..4d6056e3 100644 --- a/AerospikeClient/Async/AsyncBatch.cs +++ b/AerospikeClient/Async/AsyncBatch.cs @@ -1784,7 +1784,7 @@ BatchAttr attr this.records = records; // Create commands. - List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, false, this); + List batchNodes = BatchNode.GenerateList(cluster, policy, keys, records, true, this); AsyncBatchCommand[] tasks = new AsyncBatchCommand[batchNodes.Count]; int count = 0; diff --git a/AerospikeClient/Async/AsyncClient.cs b/AerospikeClient/Async/AsyncClient.cs index f3be119f..ef784d1a 100644 --- a/AerospikeClient/Async/AsyncClient.cs +++ b/AerospikeClient/Async/AsyncClient.cs @@ -162,7 +162,7 @@ public void Commit(CommitListener listener, Txn txn) break; case Txn.TxnState.VERIFIED: - atr.Verify(listener); + atr.Commit(listener); break; case Txn.TxnState.COMMITTED: diff --git a/AerospikeClient/Async/AsyncTxnMarkRollForward.cs b/AerospikeClient/Async/AsyncTxnMarkRollForward.cs index c050de4e..33807800 100644 --- a/AerospikeClient/Async/AsyncTxnMarkRollForward.cs +++ b/AerospikeClient/Async/AsyncTxnMarkRollForward.cs @@ -53,7 +53,7 @@ protected internal override bool ParseResult() ParseHeader(); ParseFields(policy.Txn, Key, true); - // MRT_COMMITTED is considered a success because it means a previous attempt already + // MRT_COMMITTED is considered a success because it means a previous attempt already // succeeded in notifying the server that the MRT will be rolled forward. if (resultCode == ResultCode.OK || resultCode == ResultCode.MRT_COMMITTED) { diff --git a/AerospikeClient/Async/AsyncTxnRoll.cs b/AerospikeClient/Async/AsyncTxnRoll.cs index b7c09c81..053d7680 100644 --- a/AerospikeClient/Async/AsyncTxnRoll.cs +++ b/AerospikeClient/Async/AsyncTxnRoll.cs @@ -127,9 +127,9 @@ private void MarkRollForward() AsyncTxnMarkRollForward command = new(cluster, writeListener, writePolicy, txnKey); command.Execute(); } - catch (Exception t) + catch (Exception e) { - NotifyMarkRollForwardFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, t); + NotifyMarkRollForwardFailure(CommitErrorType.MARK_ROLL_FORWARD_ABANDONED, e); } } @@ -153,9 +153,9 @@ private void RollBack() RollBackListener rollListener = new(this); Roll(rollListener, Command.INFO4_MRT_ROLL_BACK); } - catch (Exception t) + catch (Exception e) { - NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, t); + NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, e); } } @@ -214,10 +214,12 @@ private void CloseOnCommit(bool verified) } catch (Exception e) { - if (verified) { + if (verified) + { NotifyCommitSuccess(CommitStatusType.CLOSE_ABANDONED); } - else { + else + { NotifyCommitFailure(CommitErrorType.VERIFY_FAIL_CLOSE_ABANDONED, e); } } @@ -254,7 +256,7 @@ private void NotifyCommitSuccess(CommitStatusType status) } catch (Exception t) { - Log.Error("CommitListener onSuccess() failed: " + t.StackTrace); + Log.Error("CommitListener OnSuccess() failed: " + t.StackTrace); } } @@ -301,13 +303,13 @@ private void NotifyMarkRollForwardFailure(CommitErrorType error, Exception cause } else if (txn.InDoubt) { - // The transaction was already inDoubt and just failed again, - // so the new exception should also be inDoubt. + // The transaction was already InDoubt and just failed again, + // so the new exception should also be InDoubt. aec.SetInDoubt(true); } - else if (aec.InDoubt) + else if (ae.InDoubt) { - // The current exception is inDoubt. + // The current exception is InDoubt. aec.SetInDoubt(true); txn.InDoubt = true; } @@ -367,7 +369,7 @@ private void NotifyAbortSuccess(AbortStatusType status) } catch (Exception e) { - Log.Error("AbortListener onSuccess() failed: " + e.StackTrace); + Log.Error("AbortListener OnSuccess() failed: " + e.StackTrace); } } @@ -446,7 +448,7 @@ public MarkRollForwardListener(AsyncTxnRoll command) public void OnSuccess(Key key) { - command.txn.State = Txn.TxnState.VERIFIED; + command.txn.State = Txn.TxnState.COMMITTED; command.txn.InDoubt = false; command.RollForward(); } diff --git a/AerospikeClient/Command/TxnRoll.cs b/AerospikeClient/Command/TxnRoll.cs index 219233e3..d5b3e48f 100644 --- a/AerospikeClient/Command/TxnRoll.cs +++ b/AerospikeClient/Command/TxnRoll.cs @@ -104,8 +104,8 @@ public CommitStatusType Commit(BatchPolicy rollPolicy) } else if (txn.InDoubt) { - // The transaction was already inDoubt and just failed again, - // so the new exception should also be inDoubt. + // The transaction was already InDoubt and just failed again, + // so the new exception should also be InDoubt. aec.SetInDoubt(true); } else if (ae.InDoubt) diff --git a/AerospikeClient/Main/ResultCode.cs b/AerospikeClient/Main/ResultCode.cs index 6cdeb714..bf060407 100644 --- a/AerospikeClient/Main/ResultCode.cs +++ b/AerospikeClient/Main/ResultCode.cs @@ -289,10 +289,11 @@ public sealed class ResultCode public const int MRT_BLOCKED = 29; /// - /// MRT read verify failed. Some other command changed record outside of the transaction. + /// MRT read version mismatch identified during commit. + /// Some other command changed the record outside of the transaction. /// Value: 30 /// - public const int MRT_CONFLICT = 30; + public const int MRT_VERSION_MISMATCH = 30; /// /// MRT deadline reached without a successful commit or abort. @@ -716,8 +717,8 @@ public static string GetResultString(int resultCode) case MRT_BLOCKED: return "MRT record blocked by a different transaction"; - case MRT_CONFLICT: - return "MRT read verify failed"; + case MRT_VERSION_MISMATCH: + return "MRT version mismatch"; case MRT_EXPIRED: return "MRT expired"; diff --git a/AerospikeClient/Main/Txn.cs b/AerospikeClient/Main/Txn.cs index 9dfd3079..f96eae5a 100644 --- a/AerospikeClient/Main/Txn.cs +++ b/AerospikeClient/Main/Txn.cs @@ -15,12 +15,6 @@ * the License. */ -using System; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Security.Cryptography; -using System.Threading; - namespace Aerospike.Client { /// @@ -45,6 +39,10 @@ public enum TxnState public ConcurrentHashMap Reads { get; private set; } public ConcurrentHashSet Writes { get; private set; } public TxnState State { get; internal set; } + + /// + /// MRT namespace. + /// public string Ns { get; private set; } /// @@ -229,7 +227,7 @@ public void SetNamespace(List records) /// /// Set that the MRT monitor existence is in doubt. /// - public void SetMonitorInDoubt() + internal void SetMonitorInDoubt() { this.monitorInDoubt = true; } diff --git a/AerospikeTest/Sync/Basic/TestTxn.cs b/AerospikeTest/Sync/Basic/TestTxn.cs index 9228cee9..c4188dd1 100644 --- a/AerospikeTest/Sync/Basic/TestTxn.cs +++ b/AerospikeTest/Sync/Basic/TestTxn.cs @@ -639,7 +639,7 @@ record = client.Get(p, key1); } catch (AerospikeException ae) { - if (ae.Result != ResultCode.MRT_CONFLICT) + if (ae.Result != ResultCode.MRT_VERSION_MISMATCH) { throw; } From 72cce8fc69b5d7f9a500c129c31f2209cbab408e Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Tue, 5 Nov 2024 10:39:37 -0700 Subject: [PATCH 12/41] Send MRT timeout to server correctly --- AerospikeClient/Command/Command.cs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/AerospikeClient/Command/Command.cs b/AerospikeClient/Command/Command.cs index d9705949..1604955a 100644 --- a/AerospikeClient/Command/Command.cs +++ b/AerospikeClient/Command/Command.cs @@ -137,7 +137,20 @@ public void SetTxnAddKeys(WritePolicy policy, Key key, OperateArgs args) bool compress = SizeBuffer(policy); - WriteTxnMonitor(key, args.readAttr, args.writeAttr, fieldCount, args.operations.Length); + dataOffset += 8; + dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[dataOffset++] = (byte)args.readAttr; + dataBuffer[dataOffset++] = (byte)args.writeAttr; + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = 0; + dataBuffer[dataOffset++] = 0; + dataOffset += ByteUtil.IntToBytes(0, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)policy.expiration, dataBuffer, dataOffset); + dataOffset += ByteUtil.IntToBytes((uint)serverTimeout, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)fieldCount, dataBuffer, dataOffset); + dataOffset += ByteUtil.ShortToBytes((ushort)args.operations.Length, dataBuffer, dataOffset); + + WriteKey(key); foreach (Operation operation in args.operations) { From a22b63f7725fc9d25f07c564227afbf9e076e0e3 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Fri, 8 Nov 2024 10:32:43 -0700 Subject: [PATCH 13/41] Work on pipeline --- .github/actions/run-ee-server/action.yml | 101 ++++++++++++++++++ .github/workflows/build-artifacts.yml | 75 +++++++++++++ .../docker-build-context/Dockerfile.txt | 49 +++++++++ .github/workflows/dotnet.yml | 85 +++++++++++++++ 4 files changed, 310 insertions(+) create mode 100644 .github/actions/run-ee-server/action.yml create mode 100644 .github/workflows/build-artifacts.yml create mode 100644 .github/workflows/docker-build-context/Dockerfile.txt create mode 100644 .github/workflows/dotnet.yml diff --git a/.github/actions/run-ee-server/action.yml b/.github/actions/run-ee-server/action.yml new file mode 100644 index 00000000..b0af7d1b --- /dev/null +++ b/.github/actions/run-ee-server/action.yml @@ -0,0 +1,101 @@ +name: 'Run EE Server' +description: 'Run EE server. Returns once server is ready. Only tested on Linux and macOS' +# NOTE: do not share this server container with others +# since it's using the default admin / admin credentials +inputs: + # All inputs in composite actions are strings + use-server-rc: + required: true + description: Deploy server release candidate? + default: 'false' + server-tag: + required: true + description: Specify Docker tag + default: 'latest' + # Github Composite Actions can't access secrets + # so we need to pass them in as inputs + docker-hub-username: + description: Required for using release candidates + required: false + docker-hub-password: + description: Required for using release candidates + required: false + +runs: + using: "composite" + steps: + - name: Install crudini to manipulate config.conf + # This will only work on the Github hosted runners. + run: pipx install crudini --pip-args "-c ${{ github.workspace }}/.github/workflows/requirements.txt" + working-directory: .github/workflows + shell: bash + + - name: Create config.conf + run: cp config.conf.template config.conf + working-directory: test + shell: bash + + - run: echo SUPERUSER_NAME_AND_PASSWORD="superuser" >> $GITHUB_ENV + shell: bash + + - name: Set credentials in config file + run: | + crudini --existing=param --set config.conf enterprise-edition user ${{ env.SUPERUSER_NAME_AND_PASSWORD }} + crudini --existing=param --set config.conf enterprise-edition password ${{ env.SUPERUSER_NAME_AND_PASSWORD }} + working-directory: test + shell: bash + + - name: Log into Docker Hub to get server RC + if: ${{ inputs.use-server-rc == 'true' }} + run: docker login --username ${{ inputs.docker-hub-username }} --password ${{ inputs.docker-hub-password }} + shell: bash + + - run: echo IMAGE_NAME=aerospike/aerospike-server-enterprise${{ inputs.use-server-rc == 'true' && '-rc' || '' }}:${{ inputs.server-tag }} >> $GITHUB_ENV + shell: bash + + - run: echo NEW_IMAGE_NAME=${{ env.IMAGE_NAME }}-security-and-sc >> $GITHUB_ENV + shell: bash + + # macOS Github runners and Windows self-hosted runners don't have buildx installed by default + - if: ${{ runner.os == 'Windows' || runner.os == 'macOS' }} + uses: docker/setup-buildx-action@v3 + + - name: Build and push + uses: docker/build-push-action@v6 + with: + # Don't want to use default Git context or else it will clone the whole Python client repo again + context: .github/workflows/docker-build-context + build-args: | + server_image=${{ env.IMAGE_NAME }} + tags: ${{ env.NEW_IMAGE_NAME }} + # setup-buildx-action configures Docker to use the docker-container build driver + # This driver doesn't publish an image locally by default + # so we have to manually enable it + load: true + + - run: echo SERVER_CONTAINER_NAME="aerospike" >> $GITHUB_ENV + shell: bash + + - run: docker run -d --name ${{ env.SERVER_CONTAINER_NAME }} -e DEFAULT_TTL=2592000 -p 3000:3000 ${{ env.NEW_IMAGE_NAME }} + shell: bash + + - uses: ./.github/actions/wait-for-as-server-to-start + with: + container-name: ${{ env.SERVER_CONTAINER_NAME }} + is-security-enabled: true + is-strong-consistency-enabled: true + + - run: echo ASADM_AUTH_FLAGS="--user=${{ env.SUPERUSER_NAME_AND_PASSWORD }} --password=${{ env.SUPERUSER_NAME_AND_PASSWORD }}" >> $GITHUB_ENV + shell: bash + + # All the partitions are assumed to be dead when reusing a roster file + - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} asadm $ASADM_AUTH_FLAGS --enable --execute "manage revive ns test" + shell: bash + + # Apply changes + - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} asadm $ASADM_AUTH_FLAGS --enable --execute "manage recluster" + shell: bash + + # For debugging + - run: docker logs aerospike + shell: bash diff --git a/.github/workflows/build-artifacts.yml b/.github/workflows/build-artifacts.yml new file mode 100644 index 00000000..b0b2fbb1 --- /dev/null +++ b/.github/workflows/build-artifacts.yml @@ -0,0 +1,75 @@ +name: Build artifacts +run-name: Build artifacts (run_tests=${{ inputs.run_tests }}, use-server-rc=${{ inputs.use-server-rc }}, server-tag=${{ inputs.server-tag }}) + +# Builds manylinux wheels and source distribution +# Optionally run tests on manylinux wheels +# Then upload artifacts to Github + +on: + workflow_dispatch: + inputs: + run_tests: + description: "Run integration tests?" + required: true + type: boolean + default: false + use-server-rc: + type: boolean + required: true + default: false + description: 'Test against server release candidate? (e.g to test new server features)' + server-tag: + type: string + required: true + default: 'latest' + description: 'Server docker image tag (e.g to test a client backport version)' + + workflow_call: + inputs: + # The "dev" tests test the artifacts against a server + run_tests: + required: false + type: boolean + default: false + # workflow_call hack + is_workflow_call: + type: boolean + default: true + required: false + # This input is only used in workflow_call events + sha-to-build-and-test: + description: A calling workflow may want to run this workflow on a different ref than the calling workflow's ref + type: string + # Make it required to make things simple + required: true + # A calling workflow doesn't actually set values to the inputs below + # But that workflow needs to have default values for these inputs + use-server-rc: + required: false + default: false + type: boolean + server-tag: + type: string + required: false + default: 'latest' + secrets: + DOCKER_HUB_BOT_USERNAME: + required: true + DOCKER_HUB_BOT_PW: + required: true + MAC_M1_SELF_HOSTED_RUNNER_PW: + required: true + +jobs: + dotnet: + strategy: + matrix: + fail-fast: false + uses: ./.github/workflows/dotnet.yml + with: + # Can't use env context here, so just copy from build-sdist env var + sha-to-build-and-test: ${{ inputs.is_workflow_call == true && inputs.sha-to-build-and-test || github.sha }} + run_tests: ${{ inputs.run_tests }} + use-server-rc: ${{ inputs.use-server-rc }} + server-tag: ${{ inputs.server-tag }} + secrets: inherit diff --git a/.github/workflows/docker-build-context/Dockerfile.txt b/.github/workflows/docker-build-context/Dockerfile.txt new file mode 100644 index 00000000..9ac06a15 --- /dev/null +++ b/.github/workflows/docker-build-context/Dockerfile.txt @@ -0,0 +1,49 @@ +ARG server_image=aerospike/aerospike-server-enterprise +ARG ROSTER_FILE_NAME=roster.smd +# Temp file for passing node id from one build stage to another +# Docker doesn't support command substitution for setting values for ARG variables, so we have to do this +ARG NODE_ID_FILE_NAME=node_id + +FROM $server_image as configure-server + +WORKDIR /opt/aerospike/smd + +# Enable authentication + +ARG AEROSPIKE_CONF_TEMPLATE_PATH=/etc/aerospike/aerospike.template.conf + +# Not using asconfig to edit config because we are working with a template file, which may not have valid values yet +RUN echo -e "security {\n\tenable-quotas true\n}\n" >> $AEROSPIKE_CONF_TEMPLATE_PATH +# security.smd was generated manually by +# 1. Starting a new Aerospike EE server using Docker +# 2. Creating the superuser user +# 3. Copying /opt/aerospike/smd/security.smd from the container and committing it to this repo +# This file should always work +# TODO: generate this automatically, somehow. +COPY security.smd . + +# Enable strong consistency +RUN sed -i "s/\(namespace.*{\)/\1\n\tstrong-consistency true/" $AEROSPIKE_CONF_TEMPLATE_PATH +RUN sed -i "s/\(namespace.*{\)/\1\n\tstrong-consistency-allow-expunge true/" $AEROSPIKE_CONF_TEMPLATE_PATH +ARG ROSTER_FILE_NAME +COPY $ROSTER_FILE_NAME . + +# Fetch node id from roster.smd + +# There's no tag for the latest major version to prevent breaking changes in jq +# This is the next best thing +FROM ghcr.io/jqlang/jq:1.7 as get-jq +# jq docker image doesn't have a shell +# We need a shell to fetch and pass the node id to the next build stage +FROM busybox as get-node-id +COPY --from=get-jq /jq /bin/ +ARG ROSTER_FILE_NAME +COPY $ROSTER_FILE_NAME . +ARG NODE_ID_FILE_NAME +RUN jq --raw-output '.[1].value' $ROSTER_FILE_NAME > $NODE_ID_FILE_NAME + +FROM configure-server as set-node-id +ARG NODE_ID_FILE_NAME +COPY --from=get-node-id $NODE_ID_FILE_NAME . +RUN sed -i "s/\(^service {\)/\1\n\tnode-id $(cat $NODE_ID_FILE_NAME)/" $AEROSPIKE_CONF_TEMPLATE_PATH +RUN rm $NODE_ID_FILE_NAME diff --git a/.github/workflows/dotnet.yml b/.github/workflows/dotnet.yml new file mode 100644 index 00000000..b5d6de05 --- /dev/null +++ b/.github/workflows/dotnet.yml @@ -0,0 +1,85 @@ +# This workflow will build a .NET project +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-net + +name: .NET + +on: + workflow_dispatch: + inputs: + run_tests: + description: 'Run Aerospike server and run tests?' + type: boolean + required: false + default: false + use-server-rc: + type: boolean + required: true + default: false + description: 'Test against server release candidate?' + server-tag: + required: true + default: 'latest' + description: 'Server docker image tag' + + workflow_call: + inputs: + # See workflow call hack in update-version.yml + is_workflow_call: + type: boolean + default: true + required: false + # Only used in workflow_call event + sha-to-build-and-test: + type: string + required: true + unoptimized: + type: boolean + required: false + default: false + run_tests: + type: boolean + required: false + default: false + use-server-rc: + required: false + type: boolean + default: false + description: 'Test against server release candidate?' + server-tag: + required: false + type: string + default: 'latest' + description: 'Server docker image tag' + secrets: + # Just make all the secrets required to make things simpler... + DOCKER_HUB_BOT_USERNAME: + required: true + DOCKER_HUB_BOT_PW: + required: true + MAC_M1_SELF_HOSTED_RUNNER_PW: + required: true + +env: + COMMIT_SHA_TO_BUILD_AND_TEST: ${{ inputs.is_workflow_call == true && inputs.sha-to-build-and-test || github.sha }} + # Note that environment variables in Github are all strings + # Github mac m1 and windows runners don't support Docker / nested virtualization + # so we need to use self-hosted runners to test wheels for these platforms + RUN_INTEGRATION_TESTS_IN_CIBW: ${{ inputs.run_tests && (startsWith(inputs.platform-tag, 'manylinux') || inputs.platform-tag == 'macosx_x86_64') }} + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - name: Setup .NET + uses: actions/setup-dotnet@v4 + with: + dotnet-version: 8.0.x + - name: Restore dependencies + run: dotnet restore + - name: Build + run: dotnet build --no-restore + - name: Test + run: dotnet test --no-build --verbosity normal \ No newline at end of file From 934c07bd9503340687213e206270e203faf928ef Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Fri, 8 Nov 2024 13:09:19 -0700 Subject: [PATCH 14/41] Continue working on pipeline --- .github/actions/run-ee-server/action.yml | 21 ------ .github/workflows/build-artifacts.yml | 4 -- .github/workflows/dotnet.yml | 2 +- .github/workflows/tests.yml | 69 +++++++++++++++++++ .../wait-for-as-server-to-start.bash | 47 +++++++++++++ 5 files changed, 117 insertions(+), 26 deletions(-) create mode 100644 .github/workflows/tests.yml create mode 100644 .github/workflows/wait-for-as-server-to-start.bash diff --git a/.github/actions/run-ee-server/action.yml b/.github/actions/run-ee-server/action.yml index b0af7d1b..050d9ff9 100644 --- a/.github/actions/run-ee-server/action.yml +++ b/.github/actions/run-ee-server/action.yml @@ -24,27 +24,6 @@ inputs: runs: using: "composite" steps: - - name: Install crudini to manipulate config.conf - # This will only work on the Github hosted runners. - run: pipx install crudini --pip-args "-c ${{ github.workspace }}/.github/workflows/requirements.txt" - working-directory: .github/workflows - shell: bash - - - name: Create config.conf - run: cp config.conf.template config.conf - working-directory: test - shell: bash - - - run: echo SUPERUSER_NAME_AND_PASSWORD="superuser" >> $GITHUB_ENV - shell: bash - - - name: Set credentials in config file - run: | - crudini --existing=param --set config.conf enterprise-edition user ${{ env.SUPERUSER_NAME_AND_PASSWORD }} - crudini --existing=param --set config.conf enterprise-edition password ${{ env.SUPERUSER_NAME_AND_PASSWORD }} - working-directory: test - shell: bash - - name: Log into Docker Hub to get server RC if: ${{ inputs.use-server-rc == 'true' }} run: docker login --username ${{ inputs.docker-hub-username }} --password ${{ inputs.docker-hub-password }} diff --git a/.github/workflows/build-artifacts.yml b/.github/workflows/build-artifacts.yml index b0b2fbb1..d53438dc 100644 --- a/.github/workflows/build-artifacts.yml +++ b/.github/workflows/build-artifacts.yml @@ -1,10 +1,6 @@ name: Build artifacts run-name: Build artifacts (run_tests=${{ inputs.run_tests }}, use-server-rc=${{ inputs.use-server-rc }}, server-tag=${{ inputs.server-tag }}) -# Builds manylinux wheels and source distribution -# Optionally run tests on manylinux wheels -# Then upload artifacts to Github - on: workflow_dispatch: inputs: diff --git a/.github/workflows/dotnet.yml b/.github/workflows/dotnet.yml index b5d6de05..ae5eb48c 100644 --- a/.github/workflows/dotnet.yml +++ b/.github/workflows/dotnet.yml @@ -76,7 +76,7 @@ jobs: - name: Setup .NET uses: actions/setup-dotnet@v4 with: - dotnet-version: 8.0.x + dotnet-version: 6.0.x - name: Restore dependencies run: dotnet restore - name: Build diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 00000000..99835089 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,69 @@ +name: Run tests + +env: + +# Trigger test workflow whenever: +# 1. Commits are pushed directly to the mrt branch +on: + push: + branches: ["mrt"] + pull_request: + branches: ["mrt"] + types: [ + # Default triggers + opened, + synchronize, + reopened, + # Additional triggers + labeled, + unlabeled + ] + workflow_dispatch: + inputs: + test-server-rc: + type: boolean + default: false + required: true + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + fail-fast: false + + steps: + - uses: actions/checkout@v4 + - name: Setup .NET + uses: actions/setup-dotnet@v4 + with: + dotnet-version: 6.0.x + - name: Restore dependencies + run: dotnet restore + - name: Install dependencies + run: dotnet add package NeoLua --version 1.3.14 + - name: Build + run: dotnet build --no-restore + + test-ee: + runs-on: ubuntu-latest + needs: build + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + + - uses: ./.github/actions/run-ee-server + with: + use-server-rc: ${{ contains(github.event.pull_request.labels.*.name, 'new-server-features') }} + docker-hub-username: ${{ secrets.DOCKER_HUB_BOT_USERNAME }} + docker-hub-password: ${{ secrets.DOCKER_HUB_BOT_PW }} + + - name: Run tests + run: dotnet test --no-build --verbosity normal + + - name: Show logs if failed + if: ${{ failure() }} + run: | + docker container logs aerospike + cat ./configs/aerospike.conf diff --git a/.github/workflows/wait-for-as-server-to-start.bash b/.github/workflows/wait-for-as-server-to-start.bash new file mode 100644 index 00000000..c43e17da --- /dev/null +++ b/.github/workflows/wait-for-as-server-to-start.bash @@ -0,0 +1,47 @@ +#!/bin/bash + +set -x +# Makes sure that if the "docker exec" command fails, it is not ignored +set -o pipefail + +container_name=$1 +is_security_enabled=$2 + +if [[ $is_security_enabled == true ]]; then + # We need to pass credentials to asinfo if server requires it + # TODO: passing in credentials via command line flags since I can't figure out how to use --instance with global astools.conf + user_credentials="--user=admin --password=admin" +fi + +while true; do + # An unset variable will have a default empty value + # Intermediate step is to print docker exec command's output in case it fails + # Sometimes, errors only appear in stdout and not stderr, like if asinfo throws an error because of no credentials + # (This is a bug in asinfo since all error messages should be sent to stderr) + # But piping and passing stdin to grep will hide the first command's stdout. + # grep doesn't have a way to print all lines passed as input. + # ack does have an option but it doesn't come installed by default + # shellcheck disable=SC2086 # The flags in user credentials should be separate anyways. Not one string + echo "Checking if we can reach the server via the service port..." + if docker exec "$container_name" asinfo $user_credentials -v status | tee >(cat) | grep -qE "^ok"; then + # Server is ready when asinfo returns ok + echo "Can reach server now." + # docker container inspect "$container_name" + break + fi + + echo "Server didn't return ok via the service port. Polling again..." +done + +# Although the server may be reachable via the service port, the cluster may not be fully initialized yet. +# If we try to connect too soon (e.g right after "status" returns ok), the client may throw error code -1 +while true; do + echo "Waiting for server to stabilize (i.e return a cluster key)..." + # We assume that when an ERROR is returned, the cluster is not stable yet (i.e not fully initialized) + if docker exec "$container_name" asinfo $user_credentials -v cluster-stable 2>&1 | (! grep -qE "^ERROR"); then + echo "Server is in a stable state." + break + fi + + echo "Server did not return a cluster key. Polling again..." +done From bfb03c5e3153d4831f06d195fa5482fb101ef1d3 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Fri, 8 Nov 2024 13:10:45 -0700 Subject: [PATCH 15/41] Remove unneeded section --- .github/workflows/tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 99835089..9047c0b4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -1,7 +1,5 @@ name: Run tests -env: - # Trigger test workflow whenever: # 1. Commits are pushed directly to the mrt branch on: From 9fa669e9a2d08d7c7dad0f578b5bcd7d6b59c5ba Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Fri, 8 Nov 2024 13:13:21 -0700 Subject: [PATCH 16/41] Remove unneeded sections --- .github/workflows/build-artifacts.yml | 1 - .github/workflows/tests.yml | 1 - 2 files changed, 2 deletions(-) diff --git a/.github/workflows/build-artifacts.yml b/.github/workflows/build-artifacts.yml index d53438dc..5ce0674d 100644 --- a/.github/workflows/build-artifacts.yml +++ b/.github/workflows/build-artifacts.yml @@ -59,7 +59,6 @@ on: jobs: dotnet: strategy: - matrix: fail-fast: false uses: ./.github/workflows/dotnet.yml with: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 9047c0b4..4965e426 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -27,7 +27,6 @@ jobs: build: runs-on: ubuntu-latest strategy: - matrix: fail-fast: false steps: From ff638568bd0bf9e51dc2d713a98e4ef202c609d7 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Fri, 8 Nov 2024 13:19:49 -0700 Subject: [PATCH 17/41] Add enableWindowsTargeting --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4965e426..d661a610 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -40,7 +40,7 @@ jobs: - name: Install dependencies run: dotnet add package NeoLua --version 1.3.14 - name: Build - run: dotnet build --no-restore + run: dotnet build --no-restore /p:EnableWindowsTargeting=true test-ee: runs-on: ubuntu-latest From 28b890d3046276cde5b36cd53d100afcd9a410de Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Fri, 8 Nov 2024 13:21:32 -0700 Subject: [PATCH 18/41] Add it to dotnet restore --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index d661a610..b87037ae 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -36,7 +36,7 @@ jobs: with: dotnet-version: 6.0.x - name: Restore dependencies - run: dotnet restore + run: dotnet restore /p:EnableWindowsTargeting=true - name: Install dependencies run: dotnet add package NeoLua --version 1.3.14 - name: Build From 8cecc7cf7e7efe72bbc31bf21b54a1344b6e1361 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Fri, 8 Nov 2024 13:26:34 -0700 Subject: [PATCH 19/41] Not sure how dependencies work --- .github/workflows/tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index b87037ae..4761b30d 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -37,8 +37,8 @@ jobs: dotnet-version: 6.0.x - name: Restore dependencies run: dotnet restore /p:EnableWindowsTargeting=true - - name: Install dependencies - run: dotnet add package NeoLua --version 1.3.14 + #- name: Install dependencies + # run: dotnet add package NeoLua --version 1.3.14 - name: Build run: dotnet build --no-restore /p:EnableWindowsTargeting=true From b17ac5180c171e8c4b2530c7c5e53ded3486b49a Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Fri, 8 Nov 2024 13:31:41 -0700 Subject: [PATCH 20/41] Wrong file extension --- .../workflows/docker-build-context/{Dockerfile.txt => Dockerfile} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/workflows/docker-build-context/{Dockerfile.txt => Dockerfile} (100%) diff --git a/.github/workflows/docker-build-context/Dockerfile.txt b/.github/workflows/docker-build-context/Dockerfile similarity index 100% rename from .github/workflows/docker-build-context/Dockerfile.txt rename to .github/workflows/docker-build-context/Dockerfile From 3ac4ac967282bdca9b20cdd9ab8d2145ee13ea4c Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Mon, 11 Nov 2024 09:32:17 -0700 Subject: [PATCH 21/41] Add roster and security files --- .../workflows/docker-build-context/roster.smd | 12 +++++ .../docker-build-context/security.smd | 48 +++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 .github/workflows/docker-build-context/roster.smd create mode 100644 .github/workflows/docker-build-context/security.smd diff --git a/.github/workflows/docker-build-context/roster.smd b/.github/workflows/docker-build-context/roster.smd new file mode 100644 index 00000000..66daed5f --- /dev/null +++ b/.github/workflows/docker-build-context/roster.smd @@ -0,0 +1,12 @@ +[ + [ + 97107025374203, + 1 + ], + { + "key": "test", + "value": "a1", + "generation": 1, + "timestamp": 465602976982 + } +] diff --git a/.github/workflows/docker-build-context/security.smd b/.github/workflows/docker-build-context/security.smd new file mode 100644 index 00000000..9c530d51 --- /dev/null +++ b/.github/workflows/docker-build-context/security.smd @@ -0,0 +1,48 @@ +[ + [ + 162276881999406, + 14 + ], + { + "key": "admin|P", + "value": "$2a$10$7EqJtq98hPqEX7fNZaFWoO1mVO/4MLpGzsqojz6E9Gef6iXDjXdDa", + "generation": 1, + "timestamp": 0 + }, + { + "key": "admin|R|user-admin", + "value": "", + "generation": 1, + "timestamp": 0 + }, + { + "key": "superuser|P", + "value": "$2a$10$7EqJtq98hPqEX7fNZaFWoOZX0o4mZCBUwvzt/iecIcG4JaDOC41zK", + "generation": 3, + "timestamp": 458774922440 + }, + { + "key": "superuser|R|read-write-udf", + "value": "", + "generation": 3, + "timestamp": 458774922441 + }, + { + "key": "superuser|R|sys-admin", + "value": "", + "generation": 3, + "timestamp": 458774922442 + }, + { + "key": "superuser|R|user-admin", + "value": "", + "generation": 3, + "timestamp": 458774922442 + }, + { + "key": "superuser|R|data-admin", + "value": null, + "generation": 2, + "timestamp": 458774718056 + } +] From 73774895a38b6a93c8860840258c272cb2430fd9 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Mon, 11 Nov 2024 09:52:45 -0700 Subject: [PATCH 22/41] Add wait for server to start action --- .../wait-for-as-server-to-start/action.yml | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/actions/wait-for-as-server-to-start/action.yml diff --git a/.github/actions/wait-for-as-server-to-start/action.yml b/.github/actions/wait-for-as-server-to-start/action.yml new file mode 100644 index 00000000..373c2697 --- /dev/null +++ b/.github/actions/wait-for-as-server-to-start/action.yml @@ -0,0 +1,28 @@ +name: 'Wait for Aerospike server to start' +description: Only tested on Linux and macOS +inputs: + container-name: + required: true + is-security-enabled: + required: false + default: 'false' + is-strong-consistency-enabled: + required: false + default: 'false' + +runs: + using: "composite" + steps: + - name: 'macOS: install timeout command' + if: ${{ runner.os == 'macOS' }} + run: brew install coreutils + shell: bash + + # Composite actions doesn't support step-level timeout-minutes + # Use timeout command and store polling logic in file to make it easier to read + # Call bash shell explicitly since timeout uses "sh" shell by default, for some reason + # Also, we don't want to fail if we timeout in case the server *did* finish starting up but the script couldn't detect it due to a bug + # Effectively, this composite action is like calling "sleep" that is optimized to exit early when it detects an ok from the server + - name: Wait for EE server to start + run: timeout 30 bash ./.github/workflows/wait-for-as-server-to-start.bash ${{ inputs.container-name }} ${{ inputs.is-security-enabled }} ${{ inputs.is-strong-consistency-enabled }} || true + shell: bash From ba57fc0a014d42b66ee4bc29a71f77b3be88a6da Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Mon, 11 Nov 2024 11:39:44 -0700 Subject: [PATCH 23/41] Remove ASADM_AUTH_FLAGS --- .github/actions/run-ee-server/action.yml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.github/actions/run-ee-server/action.yml b/.github/actions/run-ee-server/action.yml index 050d9ff9..3476465f 100644 --- a/.github/actions/run-ee-server/action.yml +++ b/.github/actions/run-ee-server/action.yml @@ -64,17 +64,6 @@ runs: is-security-enabled: true is-strong-consistency-enabled: true - - run: echo ASADM_AUTH_FLAGS="--user=${{ env.SUPERUSER_NAME_AND_PASSWORD }} --password=${{ env.SUPERUSER_NAME_AND_PASSWORD }}" >> $GITHUB_ENV - shell: bash - - # All the partitions are assumed to be dead when reusing a roster file - - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} asadm $ASADM_AUTH_FLAGS --enable --execute "manage revive ns test" - shell: bash - - # Apply changes - - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} asadm $ASADM_AUTH_FLAGS --enable --execute "manage recluster" - shell: bash - # For debugging - run: docker logs aerospike shell: bash From cbdf39a93a4c64f1d93b759f382b6b81225663d0 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Mon, 11 Nov 2024 14:20:48 -0700 Subject: [PATCH 24/41] Verify that MRT state is OPEN when running commands in that MRT --- AerospikeClient/Async/AsyncClient.cs | 34 ++++++++--------- AerospikeClient/Command/TxnMonitor.cs | 5 +++ AerospikeClient/Main/AerospikeClient.cs | 49 ++++++------------------- AerospikeClient/Main/Txn.cs | 48 ++++++++++++++++++++++-- AerospikeTest/AerospikeTest.csproj | 2 +- AerospikeTest/Async/TestAsyncTxn.cs | 6 +-- AerospikeTest/Sync/Basic/TestTxn.cs | 2 +- 7 files changed, 83 insertions(+), 63 deletions(-) diff --git a/AerospikeClient/Async/AsyncClient.cs b/AerospikeClient/Async/AsyncClient.cs index ef784d1a..51eaa9f7 100644 --- a/AerospikeClient/Async/AsyncClient.cs +++ b/AerospikeClient/Async/AsyncClient.cs @@ -632,7 +632,7 @@ public void Exists(Policy policy, ExistsListener listener, Key key) policy = readPolicyDefault; } - policy.Txn?.SetNamespace(key.ns); + policy.Txn?.PrepareRead(key.ns); AsyncExists async = new AsyncExists(cluster, policy, key, listener); async.Execute(); @@ -673,7 +673,7 @@ public void Exists(BatchPolicy policy, ExistsArrayListener listener, Key[] keys) { policy = batchPolicyDefault; } - policy.Txn?.SetNamespace(keys); + policy.Txn?.PrepareRead(keys); AsyncBatchExistsArrayExecutor executor = new(cluster, policy, keys, listener); executor.Execute(); @@ -699,7 +699,7 @@ public void Exists(BatchPolicy policy, ExistsSequenceListener listener, Key[] ke { policy = batchPolicyDefault; } - policy.Txn?.SetNamespace(keys); + policy.Txn?.PrepareRead(keys); AsyncBatchExistsSequenceExecutor executor = new(cluster, policy, keys, listener); executor.Execute(); @@ -740,7 +740,7 @@ public void Get(Policy policy, RecordListener listener, Key key) policy = readPolicyDefault; } - policy.Txn?.SetNamespace(key.ns); + policy.Txn?.PrepareRead(key.ns); AsyncRead async = new AsyncRead(cluster, policy, listener, key, (string[])null); async.Execute(); @@ -779,7 +779,7 @@ public void Get(Policy policy, RecordListener listener, Key key, params string[] policy = readPolicyDefault; } - policy.Txn?.SetNamespace(key.ns); + policy.Txn?.PrepareRead(key.ns); AsyncRead async = new AsyncRead(cluster, policy, listener, key, binNames); async.Execute(); @@ -816,7 +816,7 @@ public void GetHeader(Policy policy, RecordListener listener, Key key) policy = readPolicyDefault; } - policy.Txn?.SetNamespace(key.ns); + policy.Txn?.PrepareRead(key.ns); AsyncReadHeader async = new AsyncReadHeader(cluster, policy, listener, key); async.Execute(); @@ -870,7 +870,7 @@ public void Get(BatchPolicy policy, BatchListListener listener, List { policy = batchPolicyDefault; } - policy.Txn?.SetNamespace(records); + policy.Txn?.PrepareRead(records); AsyncBatchReadListExecutor executor = new(cluster, policy, listener, records); executor.Execute(); @@ -901,7 +901,7 @@ public void Get(BatchPolicy policy, BatchSequenceListener listener, List list = new(keys.Length); foreach (Key key in keys) @@ -89,6 +92,8 @@ public static Operation[] GetTxnOps(Txn txn, Key[] keys) public static Operation[] GetTxnOps(Txn txn, List records) { + txn.VerifyCommand(); + List list = new(records.Count); foreach (BatchRecord br in records) { diff --git a/AerospikeClient/Main/AerospikeClient.cs b/AerospikeClient/Main/AerospikeClient.cs index a08341ca..daa5e954 100644 --- a/AerospikeClient/Main/AerospikeClient.cs +++ b/AerospikeClient/Main/AerospikeClient.cs @@ -834,10 +834,7 @@ public bool Exists(Policy policy, Key key) policy = readPolicyDefault; } - if (policy.Txn != null) - { - policy.Txn.SetNamespace(key.ns); - } + policy.Txn?.PrepareRead(key.ns); ExistsCommand command = new ExistsCommand(cluster, policy, key); command.Execute(); @@ -863,10 +860,7 @@ public bool[] Exists(BatchPolicy policy, Key[] keys) policy = batchPolicyDefault; } - if (policy.Txn != null) - { - policy.Txn.SetNamespace(keys); - } + policy.Txn?.PrepareRead(keys); bool[] existsArray = new bool[keys.Length]; @@ -920,7 +914,7 @@ public Record Get(Policy policy, Key key) policy = readPolicyDefault; } - policy.Txn?.SetNamespace(key.ns); + policy.Txn?.PrepareRead(key.ns); ReadCommand command = new ReadCommand(cluster, policy, key); command.Execute(); @@ -943,10 +937,7 @@ public Record Get(Policy policy, Key key, params string[] binNames) policy = readPolicyDefault; } - if (policy.Txn != null) - { - policy.Txn.SetNamespace(key.ns); - } + policy.Txn?.PrepareRead(key.ns); ReadCommand command = new ReadCommand(cluster, policy, key, binNames); command.Execute(); @@ -968,10 +959,7 @@ public Record GetHeader(Policy policy, Key key) policy = readPolicyDefault; } - if (policy.Txn != null) - { - policy.Txn.SetNamespace(key.ns); - } + policy.Txn?.PrepareRead(key.ns); ReadHeaderCommand command = new ReadHeaderCommand(cluster, policy, key); command.Execute(); @@ -1005,10 +993,7 @@ public bool Get(BatchPolicy policy, List records) policy = batchPolicyDefault; } - if (policy.Txn != null) - { - policy.Txn.SetNamespace(records); - } + policy.Txn?.PrepareRead(records); BatchStatus status = new BatchStatus(true); List batchNodes = BatchNode.GenerateList(cluster, policy, records, status); @@ -1044,10 +1029,7 @@ public Record[] Get(BatchPolicy policy, Key[] keys) } - if (policy.Txn != null) - { - policy.Txn.SetNamespace(keys); - } + policy.Txn?.PrepareRead(keys); Record[] records = new Record[keys.Length]; @@ -1104,10 +1086,7 @@ public Record[] Get(BatchPolicy policy, Key[] keys, params string[] binNames) policy = batchPolicyDefault; } - if (policy.Txn != null) - { - policy.Txn.SetNamespace(keys); - } + policy.Txn?.PrepareRead(keys); Record[] records = new Record[keys.Length]; @@ -1163,10 +1142,7 @@ public Record[] Get(BatchPolicy policy, Key[] keys, params Operation[] ops) policy = batchPolicyDefault; } - if (policy.Txn != null) - { - policy.Txn.SetNamespace(keys); - } + policy.Txn?.PrepareRead(keys); Record[] records = new Record[keys.Length]; @@ -1222,10 +1198,7 @@ public Record[] GetHeader(BatchPolicy policy, Key[] keys) } - if (policy.Txn != null) - { - policy.Txn.SetNamespace(keys); - } + policy.Txn?.PrepareRead(keys); Record[] records = new Record[keys.Length]; @@ -1349,7 +1322,7 @@ public Record Operate(WritePolicy policy, Key key, params Operation[] operations { if (policy?.Txn != null) { - policy.Txn.SetNamespace(key.ns); + policy.Txn.PrepareRead(key.ns); } OperateCommandRead command = new(cluster, key, args); diff --git a/AerospikeClient/Main/Txn.cs b/AerospikeClient/Main/Txn.cs index f96eae5a..e40990c5 100644 --- a/AerospikeClient/Main/Txn.cs +++ b/AerospikeClient/Main/Txn.cs @@ -121,6 +121,48 @@ private static long CreateId() return newState * 0x2545f4914f6cdd1dL; } + /// + /// Verify current MRT state and namespace for a future read command. + /// + /// + internal void PrepareRead(string ns) + { + VerifyCommand(); + SetNamespace(ns); + } + + /// + /// Verify current MRT state and namespaces for a future batch read command. + /// + /// + internal void PrepareRead(Key[] keys) + { + VerifyCommand(); + SetNamespace(keys); + } + + /// + /// Verify current MRT state and namespaces for a future batch read command. + /// + /// + internal void PrepareRead(List records) + { + VerifyCommand(); + SetNamespace(records); + } + + /// + /// Verify that the MRT state allows future commands. + /// + /// + public void VerifyCommand() + { + if (State != TxnState.OPEN) + { + throw new AerospikeException("Command not allowed in current MRT state: " + State); + } + } + /// /// Process the results of a record read. For internal use only. /// @@ -188,7 +230,7 @@ public void OnWriteInDoubt(Key key) /// Set MRT namespace only if doesn't already exist. /// If namespace already exists, verify new namespace is the same. /// - public void SetNamespace(string ns) + internal void SetNamespace(string ns) { if (Ns == null) { @@ -204,7 +246,7 @@ public void SetNamespace(string ns) /// Set MRT namespaces for each key only if doesn't already exist. /// If namespace already exists, verify new namespace is the same. /// - public void SetNamespace(Key[] keys) + internal void SetNamespace(Key[] keys) { foreach (Key key in keys) { @@ -216,7 +258,7 @@ public void SetNamespace(Key[] keys) /// Set MRT namespaces for each key only if doesn't already exist. /// If namespace already exists, verify new namespace is the same. /// - public void SetNamespace(List records) + internal void SetNamespace(List records) { foreach (BatchRead br in records) { diff --git a/AerospikeTest/AerospikeTest.csproj b/AerospikeTest/AerospikeTest.csproj index 5e366a9c..6d519801 100644 --- a/AerospikeTest/AerospikeTest.csproj +++ b/AerospikeTest/AerospikeTest.csproj @@ -1,4 +1,4 @@ - + Debug diff --git a/AerospikeTest/Async/TestAsyncTxn.cs b/AerospikeTest/Async/TestAsyncTxn.cs index f6440029..8bc6fc5f 100644 --- a/AerospikeTest/Async/TestAsyncTxn.cs +++ b/AerospikeTest/Async/TestAsyncTxn.cs @@ -409,9 +409,9 @@ public void AsyncTxnLUTCommit() // Test Case 38 new Put(txn, key3, "val1"), // T9 new GetExpect(txn, key3, "val1", 1), // T10 new Commit(txn), // T11 - new GetExpect(txn, key1, "val11", 3), // T12 - new GetExpect(txn, key2, "val11", 3), - new GetExpect(txn, key3, "val1", 2) + new GetExpect(null, key1, "val11", 3), // T12 + new GetExpect(null, key2, "val11", 3), + new GetExpect(null, key3, "val1", 2) }; Execute(cmds); diff --git a/AerospikeTest/Sync/Basic/TestTxn.cs b/AerospikeTest/Sync/Basic/TestTxn.cs index c4188dd1..7e517139 100644 --- a/AerospikeTest/Sync/Basic/TestTxn.cs +++ b/AerospikeTest/Sync/Basic/TestTxn.cs @@ -687,7 +687,7 @@ public void TxnWriteAfterCommit() } catch (AerospikeException ae) { - if (ae.Result != ResultCode.MRT_EXPIRED) + if (!ae.Message.Contains("Command not allowed in current MRT state:")) { throw; } From 7fe7c4cfff7fb784911de98f35d7a629284eafdf Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Tue, 12 Nov 2024 09:31:33 -0700 Subject: [PATCH 25/41] Get rid of second checkout --- .github/workflows/tests.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4761b30d..50d869a6 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -46,10 +46,6 @@ jobs: runs-on: ubuntu-latest needs: build steps: - - uses: actions/checkout@v2 - with: - submodules: recursive - - uses: ./.github/actions/run-ee-server with: use-server-rc: ${{ contains(github.event.pull_request.labels.*.name, 'new-server-features') }} From 2b441c04ca47ef8e99aeb4784002d36b27f5f6e7 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Wed, 13 Nov 2024 10:10:05 -0700 Subject: [PATCH 26/41] Combine build, setup server, and test into one job to simplify for now --- .github/workflows/tests.yml | 55 ++++++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 16 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 50d869a6..1ef603e6 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -24,13 +24,43 @@ on: required: true jobs: - build: - runs-on: ubuntu-latest - strategy: - fail-fast: false + #build: + # runs-on: ubuntu-latest + # strategy: + # fail-fast: false + # + # steps: + # - uses: actions/checkout@v4 + # - name: Setup .NET + # uses: actions/setup-dotnet@v4 + # with: + # dotnet-version: 6.0.x + # - name: Restore dependencies + # run: dotnet restore /p:EnableWindowsTargeting=true + # #- name: Install dependencies + # # run: dotnet add package NeoLua --version 1.3.14 + # - name: Build + # run: dotnet build --configuration Release --no-restore /p:EnableWindowsTargeting=true + # + # - name: Publish + # run: dotnet publish -c Release -o ../publish + # + # - name: Send files to test jobs + # uses: actions/upload-artifact@v3 + # with: + # name: AerospikeClient.dll + # path: ./AerospikeClient/bin/Debug/*.whl + test-ee: + runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v2 + + - uses: ./.github/actions/run-ee-server + with: + use-server-rc: ${{ contains(github.event.pull_request.labels.*.name, 'new-server-features') }} + docker-hub-username: ${{ secrets.DOCKER_HUB_BOT_USERNAME }} + docker-hub-password: ${{ secrets.DOCKER_HUB_BOT_PW }} - name: Setup .NET uses: actions/setup-dotnet@v4 with: @@ -40,17 +70,10 @@ jobs: #- name: Install dependencies # run: dotnet add package NeoLua --version 1.3.14 - name: Build - run: dotnet build --no-restore /p:EnableWindowsTargeting=true - - test-ee: - runs-on: ubuntu-latest - needs: build - steps: - - uses: ./.github/actions/run-ee-server - with: - use-server-rc: ${{ contains(github.event.pull_request.labels.*.name, 'new-server-features') }} - docker-hub-username: ${{ secrets.DOCKER_HUB_BOT_USERNAME }} - docker-hub-password: ${{ secrets.DOCKER_HUB_BOT_PW }} + run: dotnet build --configuration Release --no-restore /p:EnableWindowsTargeting=true + + - name: Publish + run: dotnet publish -c Release -o ../publish - name: Run tests run: dotnet test --no-build --verbosity normal From 7288858816b61822d9acdafbbd311551a225f48b Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Wed, 13 Nov 2024 10:13:43 -0700 Subject: [PATCH 27/41] Get rid of publish step --- .github/workflows/tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1ef603e6..4fc5c652 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -71,9 +71,6 @@ jobs: # run: dotnet add package NeoLua --version 1.3.14 - name: Build run: dotnet build --configuration Release --no-restore /p:EnableWindowsTargeting=true - - - name: Publish - run: dotnet publish -c Release -o ../publish - name: Run tests run: dotnet test --no-build --verbosity normal From 7bf438e21ec731772838fc845811ec3bec886b4d Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Wed, 13 Nov 2024 10:17:23 -0700 Subject: [PATCH 28/41] Specify release configuration in test step --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4fc5c652..c3621943 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -73,7 +73,7 @@ jobs: run: dotnet build --configuration Release --no-restore /p:EnableWindowsTargeting=true - name: Run tests - run: dotnet test --no-build --verbosity normal + run: dotnet test --configuration Release --no-build --verbosity normal - name: Show logs if failed if: ${{ failure() }} From 77f3252ada2fe33760a5f535bb464385938f77ff Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Wed, 13 Nov 2024 13:07:18 -0700 Subject: [PATCH 29/41] Disable security --- .github/actions/run-ee-server/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/run-ee-server/action.yml b/.github/actions/run-ee-server/action.yml index 3476465f..28c252e3 100644 --- a/.github/actions/run-ee-server/action.yml +++ b/.github/actions/run-ee-server/action.yml @@ -61,7 +61,7 @@ runs: - uses: ./.github/actions/wait-for-as-server-to-start with: container-name: ${{ env.SERVER_CONTAINER_NAME }} - is-security-enabled: true + is-security-enabled: false is-strong-consistency-enabled: true # For debugging From 0e93ee31fe8f30331d5b15008d53e129e9ca1846 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Wed, 13 Nov 2024 13:08:41 -0700 Subject: [PATCH 30/41] remove security lines from dockerfile for now --- .github/workflows/docker-build-context/Dockerfile | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.github/workflows/docker-build-context/Dockerfile b/.github/workflows/docker-build-context/Dockerfile index 9ac06a15..5f20cb51 100644 --- a/.github/workflows/docker-build-context/Dockerfile +++ b/.github/workflows/docker-build-context/Dockerfile @@ -12,16 +12,6 @@ WORKDIR /opt/aerospike/smd ARG AEROSPIKE_CONF_TEMPLATE_PATH=/etc/aerospike/aerospike.template.conf -# Not using asconfig to edit config because we are working with a template file, which may not have valid values yet -RUN echo -e "security {\n\tenable-quotas true\n}\n" >> $AEROSPIKE_CONF_TEMPLATE_PATH -# security.smd was generated manually by -# 1. Starting a new Aerospike EE server using Docker -# 2. Creating the superuser user -# 3. Copying /opt/aerospike/smd/security.smd from the container and committing it to this repo -# This file should always work -# TODO: generate this automatically, somehow. -COPY security.smd . - # Enable strong consistency RUN sed -i "s/\(namespace.*{\)/\1\n\tstrong-consistency true/" $AEROSPIKE_CONF_TEMPLATE_PATH RUN sed -i "s/\(namespace.*{\)/\1\n\tstrong-consistency-allow-expunge true/" $AEROSPIKE_CONF_TEMPLATE_PATH From 04a099f9d630f2b700f95fb3d6faf418d2f6848e Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Fri, 15 Nov 2024 12:13:26 -0700 Subject: [PATCH 31/41] Set use rc to true --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c3621943..99b7bcf1 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -58,7 +58,7 @@ jobs: - uses: ./.github/actions/run-ee-server with: - use-server-rc: ${{ contains(github.event.pull_request.labels.*.name, 'new-server-features') }} + use-server-rc: true docker-hub-username: ${{ secrets.DOCKER_HUB_BOT_USERNAME }} docker-hub-password: ${{ secrets.DOCKER_HUB_BOT_PW }} - name: Setup .NET From ce65e33095217f54dd767dba0a0a71a1a2c87a64 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Mon, 18 Nov 2024 08:54:08 -0700 Subject: [PATCH 32/41] Re-add some lines that accidentally got deleted --- .github/actions/run-ee-server/action.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/actions/run-ee-server/action.yml b/.github/actions/run-ee-server/action.yml index 28c252e3..f778a5e8 100644 --- a/.github/actions/run-ee-server/action.yml +++ b/.github/actions/run-ee-server/action.yml @@ -64,6 +64,14 @@ runs: is-security-enabled: false is-strong-consistency-enabled: true + # All the partitions are assumed to be dead when reusing a roster file + - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} --enable --execute "manage revive ns test" + shell: bash + + # Apply changes + - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} --enable --execute "manage recluster" + shell: bash + # For debugging - run: docker logs aerospike shell: bash From 85d477ce909f74f85ba1116668a6b3d1192a0379 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Mon, 18 Nov 2024 09:12:38 -0700 Subject: [PATCH 33/41] The return of asadm in relevant commands --- .github/actions/run-ee-server/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/run-ee-server/action.yml b/.github/actions/run-ee-server/action.yml index f778a5e8..278db4b6 100644 --- a/.github/actions/run-ee-server/action.yml +++ b/.github/actions/run-ee-server/action.yml @@ -65,11 +65,11 @@ runs: is-strong-consistency-enabled: true # All the partitions are assumed to be dead when reusing a roster file - - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} --enable --execute "manage revive ns test" + - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} asadm --enable --execute "manage revive ns test" shell: bash # Apply changes - - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} --enable --execute "manage recluster" + - run: docker exec ${{ env.SERVER_CONTAINER_NAME }} asadm --enable --execute "manage recluster" shell: bash # For debugging From fc75be10ff93c74f4264fce829e7ddf8c5cca0c6 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Mon, 18 Nov 2024 09:21:25 -0700 Subject: [PATCH 34/41] Run with a MRT build --- .github/actions/run-ee-server/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/run-ee-server/action.yml b/.github/actions/run-ee-server/action.yml index 278db4b6..b5246245 100644 --- a/.github/actions/run-ee-server/action.yml +++ b/.github/actions/run-ee-server/action.yml @@ -11,7 +11,7 @@ inputs: server-tag: required: true description: Specify Docker tag - default: 'latest' + default: '8.0.0.0-alpha5_1' # Github Composite Actions can't access secrets # so we need to pass them in as inputs docker-hub-username: From 8cfec041a420fa6a3ad3df70163c0b81f337cee1 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Tue, 26 Nov 2024 12:09:47 -0700 Subject: [PATCH 35/41] Documentaiton changes, fix force single node --- .github/actions/run-ee-server/action.yml | 2 +- .github/workflows/dotnet.yml | 85 ------------------- .github/workflows/tests.yml | 31 +------ AerospikeClient/Admin/Role.cs | 2 +- AerospikeClient/Async/AsyncClient.cs | 33 ++++--- .../Async/AsyncQueryPartitionExecutor.cs | 2 - .../Async/AsyncScanPartitionExecutor.cs | 2 - AerospikeClient/Async/IAsyncClient.cs | 35 ++++++-- AerospikeClient/Cluster/NodeValidator.cs | 5 ++ AerospikeClient/Command/Batch.cs | 2 +- AerospikeClient/Command/Command.cs | 5 +- AerospikeClient/Command/ExecuteCommand.cs | 2 +- AerospikeClient/Command/OperateArgs.cs | 2 +- AerospikeClient/Command/ReadCommand.cs | 1 + AerospikeClient/Listener/CommitListener.cs | 2 +- AerospikeClient/Main/AerospikeClient.cs | 54 +++++++----- AerospikeClient/Main/AerospikeException.cs | 8 +- AerospikeClient/Main/BatchRecord.cs | 2 +- AerospikeClient/Main/IAerospikeClient.cs | 39 ++++++--- AerospikeClient/Main/ResultCode.cs | 9 ++ AerospikeClient/Main/Txn.cs | 26 ++++-- AerospikeClient/Policy/BatchDeletePolicy.cs | 2 +- AerospikeClient/Policy/BatchPolicy.cs | 2 +- AerospikeClient/Policy/BatchWritePolicy.cs | 2 +- AerospikeClient/Policy/CommitLevel.cs | 2 +- AerospikeClient/Query/RecordSet.cs | 2 +- AerospikeClient/Query/ResultSet.cs | 2 +- 27 files changed, 159 insertions(+), 202 deletions(-) delete mode 100644 .github/workflows/dotnet.yml diff --git a/.github/actions/run-ee-server/action.yml b/.github/actions/run-ee-server/action.yml index b5246245..b014669b 100644 --- a/.github/actions/run-ee-server/action.yml +++ b/.github/actions/run-ee-server/action.yml @@ -42,7 +42,7 @@ runs: - name: Build and push uses: docker/build-push-action@v6 with: - # Don't want to use default Git context or else it will clone the whole Python client repo again + # Don't want to use default Git context or else it will clone the whole client repo again context: .github/workflows/docker-build-context build-args: | server_image=${{ env.IMAGE_NAME }} diff --git a/.github/workflows/dotnet.yml b/.github/workflows/dotnet.yml deleted file mode 100644 index ae5eb48c..00000000 --- a/.github/workflows/dotnet.yml +++ /dev/null @@ -1,85 +0,0 @@ -# This workflow will build a .NET project -# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-net - -name: .NET - -on: - workflow_dispatch: - inputs: - run_tests: - description: 'Run Aerospike server and run tests?' - type: boolean - required: false - default: false - use-server-rc: - type: boolean - required: true - default: false - description: 'Test against server release candidate?' - server-tag: - required: true - default: 'latest' - description: 'Server docker image tag' - - workflow_call: - inputs: - # See workflow call hack in update-version.yml - is_workflow_call: - type: boolean - default: true - required: false - # Only used in workflow_call event - sha-to-build-and-test: - type: string - required: true - unoptimized: - type: boolean - required: false - default: false - run_tests: - type: boolean - required: false - default: false - use-server-rc: - required: false - type: boolean - default: false - description: 'Test against server release candidate?' - server-tag: - required: false - type: string - default: 'latest' - description: 'Server docker image tag' - secrets: - # Just make all the secrets required to make things simpler... - DOCKER_HUB_BOT_USERNAME: - required: true - DOCKER_HUB_BOT_PW: - required: true - MAC_M1_SELF_HOSTED_RUNNER_PW: - required: true - -env: - COMMIT_SHA_TO_BUILD_AND_TEST: ${{ inputs.is_workflow_call == true && inputs.sha-to-build-and-test || github.sha }} - # Note that environment variables in Github are all strings - # Github mac m1 and windows runners don't support Docker / nested virtualization - # so we need to use self-hosted runners to test wheels for these platforms - RUN_INTEGRATION_TESTS_IN_CIBW: ${{ inputs.run_tests && (startsWith(inputs.platform-tag, 'manylinux') || inputs.platform-tag == 'macosx_x86_64') }} - -jobs: - build: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - name: Setup .NET - uses: actions/setup-dotnet@v4 - with: - dotnet-version: 6.0.x - - name: Restore dependencies - run: dotnet restore - - name: Build - run: dotnet build --no-restore - - name: Test - run: dotnet test --no-build --verbosity normal \ No newline at end of file diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 99b7bcf1..03d3209a 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -24,32 +24,6 @@ on: required: true jobs: - #build: - # runs-on: ubuntu-latest - # strategy: - # fail-fast: false - # - # steps: - # - uses: actions/checkout@v4 - # - name: Setup .NET - # uses: actions/setup-dotnet@v4 - # with: - # dotnet-version: 6.0.x - # - name: Restore dependencies - # run: dotnet restore /p:EnableWindowsTargeting=true - # #- name: Install dependencies - # # run: dotnet add package NeoLua --version 1.3.14 - # - name: Build - # run: dotnet build --configuration Release --no-restore /p:EnableWindowsTargeting=true - # - # - name: Publish - # run: dotnet publish -c Release -o ../publish - # - # - name: Send files to test jobs - # uses: actions/upload-artifact@v3 - # with: - # name: AerospikeClient.dll - # path: ./AerospikeClient/bin/Debug/*.whl test-ee: runs-on: ubuntu-latest @@ -61,14 +35,15 @@ jobs: use-server-rc: true docker-hub-username: ${{ secrets.DOCKER_HUB_BOT_USERNAME }} docker-hub-password: ${{ secrets.DOCKER_HUB_BOT_PW }} + - name: Setup .NET uses: actions/setup-dotnet@v4 with: dotnet-version: 6.0.x + - name: Restore dependencies run: dotnet restore /p:EnableWindowsTargeting=true - #- name: Install dependencies - # run: dotnet add package NeoLua --version 1.3.14 + - name: Build run: dotnet build --configuration Release --no-restore /p:EnableWindowsTargeting=true diff --git a/AerospikeClient/Admin/Role.cs b/AerospikeClient/Admin/Role.cs index c94616d0..2b8af1aa 100644 --- a/AerospikeClient/Admin/Role.cs +++ b/AerospikeClient/Admin/Role.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. diff --git a/AerospikeClient/Async/AsyncClient.cs b/AerospikeClient/Async/AsyncClient.cs index 51eaa9f7..48f3469a 100644 --- a/AerospikeClient/Async/AsyncClient.cs +++ b/AerospikeClient/Async/AsyncClient.cs @@ -134,6 +134,19 @@ public AsyncClient(AsyncClientPolicy policy, params Host[] hosts) // Multi-Record Transactions //------------------------------------------------------- + /// + /// Asynchronously attempt to commit the given multi-record transaction. + /// Create listener, call asynchronous commit and return task monitor. + /// + /// multi-record transaction + /// cancellation token + public Task Commit(Txn txn, CancellationToken token) + { + var listener = new CommitListenerAdapter(token); + Commit(listener, txn); + return listener.Task; + } + /// /// Asynchronously attempt to commit the given multi-record transaction. First, the expected /// record versions are sent to the server nodes for verification. If all nodes return success, @@ -177,22 +190,23 @@ public void Commit(CommitListener listener, Txn txn) } /// - /// Asynchronously attempt to commit the given multi-record transaction. - /// Create listener, call asynchronous put and return task monitor. + /// Asynchronously attempt to abort and rollback the given multi-record transaction. + /// Create listener, call asynchronous commit and return task monitor. /// /// multi-record transaction /// cancellation token - public Task Commit(Txn txn, CancellationToken token) + public Task Abort(Txn txn, CancellationToken token) { - var listener = new CommitListenerAdapter(token); - Commit(listener, txn); + var listener = new AbortListenerAdapter(token); + Abort(listener, txn); return listener.Task; } + /// /// Asynchronously abort and rollback the given multi-record transaction. /// - /// Schedules the commit command with a channel selector and return. + /// Schedules the abort command with a channel selector and return. /// Another thread will process the command and send the results to the listener. /// /// Requires server version 8.0+ @@ -222,13 +236,6 @@ public void Abort(AbortListener listener, Txn txn) } } - public Task Abort(Txn txn, CancellationToken token) - { - var listener = new AbortListenerAdapter(token); - Abort(listener, txn); - return listener.Task; - } - //------------------------------------------------------- // Write Record Operations //------------------------------------------------------- diff --git a/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs b/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs index f308117a..68942fd1 100644 --- a/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs +++ b/AerospikeClient/Async/AsyncQueryPartitionExecutor.cs @@ -14,8 +14,6 @@ * License for the specific language governing permissions and limitations under * the License. */ -using System; -using System.Collections.Generic; namespace Aerospike.Client { diff --git a/AerospikeClient/Async/AsyncScanPartitionExecutor.cs b/AerospikeClient/Async/AsyncScanPartitionExecutor.cs index 20cbc5c0..704e25d8 100644 --- a/AerospikeClient/Async/AsyncScanPartitionExecutor.cs +++ b/AerospikeClient/Async/AsyncScanPartitionExecutor.cs @@ -14,8 +14,6 @@ * License for the specific language governing permissions and limitations under * the License. */ -using System; -using System.Collections.Generic; namespace Aerospike.Client { diff --git a/AerospikeClient/Async/IAsyncClient.cs b/AerospikeClient/Async/IAsyncClient.cs index 47a709d6..ccbbb017 100644 --- a/AerospikeClient/Async/IAsyncClient.cs +++ b/AerospikeClient/Async/IAsyncClient.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -41,14 +41,23 @@ public interface IAsyncClient : IAerospikeClient // Multi-Record Transactions //------------------------------------------------------- + /// + /// Asynchronously attempt to commit the given multi-record transaction. + /// Create listener, call asynchronous commit and return task monitor. + /// + /// multi-record transaction + /// cancellation token + public Task Commit(Txn txn, CancellationToken token); + /// /// Asynchronously attempt to commit the given multi-record transaction. First, the expected - /// record versions are sent to the server nodes for verification.If all nodes return success, + /// record versions are sent to the server nodes for verification. If all nodes return success, /// the transaction is committed. Otherwise, the transaction is aborted. /// - /// This method registers the command with an event loop and returns. - /// The event loop thread will process the command and send the results to the listener. - /// + /// Schedules the commit command with a channel selector and return. + /// Another thread will process the command and send the results to the listener. + /// + /// /// Requires server version 8.0+ /// /// @@ -56,17 +65,25 @@ public interface IAsyncClient : IAerospikeClient /// multi-record transaction void Commit(CommitListener listener, Txn txn); + /// + /// Asynchronously attempt to abort and rollback the given multi-record transaction. + /// Create listener, call asynchronous commit and return task monitor. + /// + /// multi-record transaction + /// cancellation token + public Task Abort(Txn txn, CancellationToken token); + /// /// Asynchronously abort and rollback the given multi-record transaction. /// - /// This method registers the command with an event loop and returns. - /// The event loop thread will process the command and send the results to the listener. + /// Schedules the abort command with a channel selector and return. + /// Another thread will process the command and send the results to the listener. /// /// Requires server version 8.0+ /// /// - /// - /// + /// where to send results + /// multi-record transaction void Abort(AbortListener listener, Txn txn); //------------------------------------------------------- diff --git a/AerospikeClient/Cluster/NodeValidator.cs b/AerospikeClient/Cluster/NodeValidator.cs index cf398ca7..d5599f61 100644 --- a/AerospikeClient/Cluster/NodeValidator.cs +++ b/AerospikeClient/Cluster/NodeValidator.cs @@ -100,6 +100,11 @@ public Node SeedNode(Cluster cluster, Host host, Peers peers) private bool ValidatePeers(Peers peers, Node node) { + if (peers == null) + { + return true; + } + try { peers.refreshCount = 0; diff --git a/AerospikeClient/Command/Batch.cs b/AerospikeClient/Command/Batch.cs index 16a1f68c..ac7d7b0a 100644 --- a/AerospikeClient/Command/Batch.cs +++ b/AerospikeClient/Command/Batch.cs @@ -536,7 +536,7 @@ protected internal override bool ParseRow() BatchRecord record = records[batchIndex]; - if (resultCode == 0) + if (resultCode == ResultCode.OK) { record.resultCode = resultCode; } diff --git a/AerospikeClient/Command/Command.cs b/AerospikeClient/Command/Command.cs index 1604955a..7c38a9e8 100644 --- a/AerospikeClient/Command/Command.cs +++ b/AerospikeClient/Command/Command.cs @@ -316,7 +316,7 @@ public void SetTxnMarkRollForward(Key key) Begin(); int fieldCount = EstimateKeySize(key); EstimateOperationSize(bin); - SizeBuffer(); + //SizeBuffer(); WriteTxnMonitor(key, 0, Command.INFO2_WRITE, fieldCount, 1); WriteOperation(bin, Operation.Type.WRITE); End(); @@ -462,7 +462,7 @@ public void SetTxnClose(Txn txn, Key key) { Begin(); int fieldCount = EstimateKeySize(key); - SizeBuffer(); + //SizeBuffer(); WriteTxnMonitor(key, 0, Command.INFO2_WRITE | Command.INFO2_DELETE | Command.INFO2_DURABLE_DELETE, fieldCount, 0); End(); @@ -470,6 +470,7 @@ public void SetTxnClose(Txn txn, Key key) private void WriteTxnMonitor(Key key, int readAttr, int writeAttr, int fieldCount, int opCount) { + SizeBuffer(); dataOffset += 8; dataBuffer[dataOffset++] = MSG_REMAINING_HEADER_SIZE; dataBuffer[dataOffset++] = (byte)readAttr; diff --git a/AerospikeClient/Command/ExecuteCommand.cs b/AerospikeClient/Command/ExecuteCommand.cs index da0db3ea..6637d24a 100644 --- a/AerospikeClient/Command/ExecuteCommand.cs +++ b/AerospikeClient/Command/ExecuteCommand.cs @@ -24,7 +24,7 @@ public sealed class ExecuteCommand : SyncWriteCommand private readonly string packageName; private readonly string functionName; private readonly Value[] args; - public Record Record { get; private set; } + public Record Record { get; private set; } public ExecuteCommand ( diff --git a/AerospikeClient/Command/OperateArgs.cs b/AerospikeClient/Command/OperateArgs.cs index 8f983036..bdf42800 100644 --- a/AerospikeClient/Command/OperateArgs.cs +++ b/AerospikeClient/Command/OperateArgs.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. diff --git a/AerospikeClient/Command/ReadCommand.cs b/AerospikeClient/Command/ReadCommand.cs index 891e6d0c..28d40570 100644 --- a/AerospikeClient/Command/ReadCommand.cs +++ b/AerospikeClient/Command/ReadCommand.cs @@ -53,6 +53,7 @@ protected internal override void ParseResult(Connection conn) { ParseHeader(conn); ParseFields(policy.Txn, key, false); + if (resultCode == ResultCode.OK) { this.record = policy.recordParser.ParseRecord(dataBuffer, ref dataOffset, opCount, generation, expiration, isOperation); diff --git a/AerospikeClient/Listener/CommitListener.cs b/AerospikeClient/Listener/CommitListener.cs index fdca6e2b..2629c7de 100644 --- a/AerospikeClient/Listener/CommitListener.cs +++ b/AerospikeClient/Listener/CommitListener.cs @@ -24,7 +24,7 @@ namespace Aerospike.Client public interface CommitListener { /// - /// This method is called when the records are verified and the commit succeeds. + /// This method is called when the records are verified and the commit succeeded or will succeed. /// void OnSuccess(CommitStatusType status); diff --git a/AerospikeClient/Main/AerospikeClient.cs b/AerospikeClient/Main/AerospikeClient.cs index daa5e954..ad09ad8d 100644 --- a/AerospikeClient/Main/AerospikeClient.cs +++ b/AerospikeClient/Main/AerospikeClient.cs @@ -90,7 +90,7 @@ public class AerospikeClient : IDisposable, IAerospikeClient protected BatchDeletePolicy batchDeletePolicyDefault; /// - /// Default user defined function policy used in batch UDF excecute commands. + /// Default user defined function policy used in batch UDF execute commands. /// protected BatchUDFPolicy batchUDFPolicyDefault; @@ -256,6 +256,7 @@ protected internal AerospikeClient(ClientPolicy policy) /// /// Default read policy that is used when read command policy is null. + /// Get returns a copy of the read policy default to avoid problems if this shared instance is later modified. /// public Policy ReadPolicyDefault { @@ -265,6 +266,7 @@ public Policy ReadPolicyDefault /// /// Default write policy that is used when write command policy is null. + /// Get returns a copy of the write policy default to avoid problems if this shared instance is later modified. /// public WritePolicy WritePolicyDefault { @@ -274,6 +276,7 @@ public WritePolicy WritePolicyDefault /// /// Default scan policy that is used when scan command policy is null. + /// Get returns a copy of the scan policy default to avoid problems if this shared instance is later modified. /// public ScanPolicy ScanPolicyDefault { @@ -283,6 +286,7 @@ public ScanPolicy ScanPolicyDefault /// /// Default query policy that is used when query command policy is null. + /// Get returns a copy of the query policy default to avoid problems if this shared instance is later modified. /// public QueryPolicy QueryPolicyDefault { @@ -291,8 +295,9 @@ public QueryPolicy QueryPolicyDefault } /// - /// Default parent policy used in batch read commands.Parent policy fields + /// Default parent policy used in batch read commands. Parent policy fields /// include socketTimeout, totalTimeout, maxRetries, etc... + /// Get returns a copy of the batch header read policy default to avoid problems if this shared instance is later modified. /// public BatchPolicy BatchPolicyDefault { @@ -303,6 +308,7 @@ public BatchPolicy BatchPolicyDefault /// /// Default parent policy used in batch write commands. Parent policy fields /// include socketTimeout, totalTimeout, maxRetries, etc... + /// Get returns a copy of the batch header write policy default to avoid problems if this shared instance is later modified. /// public BatchPolicy BatchParentPolicyWriteDefault { @@ -313,6 +319,7 @@ public BatchPolicy BatchParentPolicyWriteDefault /// /// Default write policy used in batch operate commands. /// Write policy fields include generation, expiration, durableDelete, etc... + /// Get returns a copy of the batch detail write policy default to avoid problems if this shared instance is later modified. /// public BatchWritePolicy BatchWritePolicyDefault { @@ -322,6 +329,7 @@ public BatchWritePolicy BatchWritePolicyDefault /// /// Default delete policy used in batch delete commands. + /// Get returns a copy of the batch detail delete policy default to avoid problems if this shared instance is later modified. /// public BatchDeletePolicy BatchDeletePolicyDefault { @@ -331,6 +339,7 @@ public BatchDeletePolicy BatchDeletePolicyDefault /// /// Default user defined function policy used in batch UDF excecute commands. + /// Get returns a copy of the batch detail UDF policy default to avoid problems if this shared instance is later modified. /// public BatchUDFPolicy BatchUDFPolicyDefault { @@ -338,8 +347,19 @@ public BatchUDFPolicy BatchUDFPolicyDefault set { batchUDFPolicyDefault = value; } } + /// + /// Default info policy that is used when info command policy is null. + /// Get returns a copy of the info command policy default to avoid problems if this shared instance is later modified. + /// + public InfoPolicy InfoPolicyDefault + { + get { return new InfoPolicy(infoPolicyDefault); } + set { infoPolicyDefault = value; } + } + /// /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// Get returns a copy of the txn verify policy default. /// public TxnVerifyPolicy TxnVerifyPolicyDefault { @@ -350,6 +370,7 @@ public TxnVerifyPolicy TxnVerifyPolicyDefault /// /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) /// or back(abort) in a batch. + /// Get returns a copy of the txn roll policy default. /// public TxnRollPolicy TxnRollPolicyDefault { @@ -357,15 +378,6 @@ public TxnRollPolicy TxnRollPolicyDefault set { txnRollPolicyDefault = value; } } - /// - /// Default info policy that is used when info command policy is null. - /// - public InfoPolicy InfoPolicyDefault - { - get { return infoPolicyDefault; } - set { infoPolicyDefault = value; } - } - //------------------------------------------------------- // Cluster Connection Management //------------------------------------------------------- @@ -467,11 +479,13 @@ public ClusterStats GetClusterStats() /// Attempt to commit the given multi-record transaction. First, the expected record versions are /// sent to the server nodes for verification. If all nodes return success, the transaction is /// committed. Otherwise, the transaction is aborted. - ///

    + /// /// Requires server version 8.0+ - ///

    + ///
    ///
    /// multi-record transaction + /// status of the commit on success + /// if verify commit fails public CommitStatus.CommitStatusType Commit(Txn txn) { TxnRoll tr = new(cluster, txn); @@ -496,11 +510,12 @@ public CommitStatus.CommitStatusType Commit(Txn txn) /// /// Abort and rollback the given multi-record transaction. - ///

    + /// /// Requires server version 8.0+ - ///

    + /// ///
    /// multi-record transaction + /// statis of the abort public AbortStatus.AbortStatusType Abort(Txn txn) { TxnRoll tr = new(cluster, txn); @@ -1028,10 +1043,8 @@ public Record[] Get(BatchPolicy policy, Key[] keys) policy = batchPolicyDefault; } - policy.Txn?.PrepareRead(keys); - Record[] records = new Record[keys.Length]; try @@ -1197,10 +1210,8 @@ public Record[] GetHeader(BatchPolicy policy, Key[] keys) policy = batchPolicyDefault; } - policy.Txn?.PrepareRead(keys); - Record[] records = new Record[keys.Length]; try @@ -1304,11 +1315,10 @@ public Record Join(BatchPolicy policy, Key key, params Join[] joins) public Record Operate(WritePolicy policy, Key key, params Operation[] operations) { OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, operations); - + policy = args.writePolicy; + if (args.hasWrite) { - policy = args.writePolicy; - if (policy.Txn != null) { TxnMonitor.AddKey(cluster, policy, key); diff --git a/AerospikeClient/Main/AerospikeException.cs b/AerospikeClient/Main/AerospikeException.cs index 27805b63..ed73da91 100644 --- a/AerospikeClient/Main/AerospikeException.cs +++ b/AerospikeClient/Main/AerospikeException.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -16,8 +16,6 @@ */ using System.Collections.ObjectModel; using System.Text; -using static Aerospike.Client.AbortStatus; -using static Aerospike.Client.AerospikeException; using static Aerospike.Client.CommitError; namespace Aerospike.Client @@ -498,7 +496,7 @@ public BatchRecordArray(BatchRecord[] records, Exception e) this.records = records; } - public BatchRecordArray(BatchRecord[] records, String message, Exception e) + public BatchRecordArray(BatchRecord[] records, string message, Exception e) : base(ResultCode.BATCH_FAILED, message, e) { this.records = records; @@ -578,7 +576,7 @@ public Backoff(int resultCode) : base(resultCode) } /// - /// Exception thrown when fails. + /// Exception thrown when fails. /// Commit Exception has similar behavior to AggregateException. /// might be populated if mutliple exceptions contribute to the failure. /// diff --git a/AerospikeClient/Main/BatchRecord.cs b/AerospikeClient/Main/BatchRecord.cs index 9196404b..3d8a31d5 100644 --- a/AerospikeClient/Main/BatchRecord.cs +++ b/AerospikeClient/Main/BatchRecord.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. diff --git a/AerospikeClient/Main/IAerospikeClient.cs b/AerospikeClient/Main/IAerospikeClient.cs index c9c6ee2f..5a825919 100644 --- a/AerospikeClient/Main/IAerospikeClient.cs +++ b/AerospikeClient/Main/IAerospikeClient.cs @@ -26,68 +26,80 @@ public interface IAerospikeClient /// /// Default read policy that is used when read command policy is null. + /// Get returns a copy of the read policy default to avoid problems if this shared instance is later modified. /// Policy ReadPolicyDefault { get; set; } /// /// Default write policy that is used when write command policy is null. + /// Get returns a copy of the write policy default to avoid problems if this shared instance is later modified. /// WritePolicy WritePolicyDefault { get; set; } /// /// Default scan policy that is used when scan command policy is null. + /// Get returns a copy of the scan policy default to avoid problems if this shared instance is later modified. /// ScanPolicy ScanPolicyDefault { get; set; } /// /// Default query policy that is used when query command policy is null. + /// Get returns a copy of the query policy default to avoid problems if this shared instance is later modified. /// QueryPolicy QueryPolicyDefault { get; set; } /// /// Default parent policy used in batch read commands.Parent policy fields /// include socketTimeout, totalTimeout, maxRetries, etc... + /// Get returns a copy of the batch header read policy default to avoid problems if this shared instance is later modified. /// BatchPolicy BatchPolicyDefault { get; set; } /// /// Default parent policy used in batch write commands. Parent policy fields /// include socketTimeout, totalTimeout, maxRetries, etc... + /// Get returns a copy of the batch header write policy default to avoid problems if this shared instance is later modified. /// BatchPolicy BatchParentPolicyWriteDefault { get; set; } /// /// Default write policy used in batch operate commands. /// Write policy fields include generation, expiration, durableDelete, etc... + /// Get returns a copy of the batch detail write policy default to avoid problems if this shared instance is later modified. /// BatchWritePolicy BatchWritePolicyDefault { get; set; } /// /// Default delete policy used in batch delete commands. + /// Get returns a copy of the batch detail delete policy default to avoid problems if this shared instance is later modified. /// BatchDeletePolicy BatchDeletePolicyDefault { get; set; } /// - /// Default user defined function policy used in batch UDF excecute commands. + /// Default user defined function policy used in batch UDF execute commands. + /// Get returns a copy of the batch detail UDF policy default to avoid problems if this shared instance is later modified. /// BatchUDFPolicy BatchUDFPolicyDefault { get; set; } + /// + /// Default info policy that is used when info command policy is null. + /// Get returns a copy of the info command policy default to avoid problems if this shared instance is later modified. + /// + InfoPolicy InfoPolicyDefault { get; set; } + /// /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. + /// Get returns a copy of the txn verify policy default. /// TxnVerifyPolicy TxnVerifyPolicyDefault { get; set; } /// /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) - /// or back(abort) in a batch. + /// or back(abort) in a batch. + /// Get returns a copy of the txn roll policy default. /// TxnRollPolicy TxnRollPolicyDefault { get; set; } - /// - /// Default info policy that is used when info command policy is null. - /// - InfoPolicy InfoPolicyDefault { get; set; } - //------------------------------------------------------- // Cluster Connection Management //------------------------------------------------------- @@ -134,22 +146,25 @@ public interface IAerospikeClient /// /// Attempt to commit the given multi-record transaction. First, the expected record versions are - /// sent to the server nodes for verification.If all nodes return success, the command is + /// sent to the server nodes for verification. If all nodes return success, the transaction is /// committed. Otherwise, the transaction is aborted. - ///

    + /// /// Requires server version 8.0+ - ///

    + /// ///
    /// multi-record transaction + /// status of the commit on success + /// if verify commit fails CommitStatus.CommitStatusType Commit(Txn txn); /// /// Abort and rollback the given multi-record transaction. - ///

    + /// /// Requires server version 8.0+ - ///

    + /// ///
    /// multi-record transaction + /// statis of the abort AbortStatus.AbortStatusType Abort(Txn txn); //------------------------------------------------------- diff --git a/AerospikeClient/Main/ResultCode.cs b/AerospikeClient/Main/ResultCode.cs index bf060407..d66ab3cf 100644 --- a/AerospikeClient/Main/ResultCode.cs +++ b/AerospikeClient/Main/ResultCode.cs @@ -319,6 +319,12 @@ public sealed class ResultCode ///
    public const int MRT_ABORTED = 34; + /// + /// MRT write command limit (4096) exceeded. + /// Value: 35 + /// + public const int MRT_TOO_MANY_WRITES = 35; + /// /// There are no more records left for query. /// Value: 50 @@ -732,6 +738,9 @@ public static string GetResultString(int resultCode) case MRT_ABORTED: return "MRT already aborted"; + case MRT_TOO_MANY_WRITES: + return "MRT write command limit exceeded"; + case QUERY_END: return "Query end"; diff --git a/AerospikeClient/Main/Txn.cs b/AerospikeClient/Main/Txn.cs index e40990c5..e01d0b76 100644 --- a/AerospikeClient/Main/Txn.cs +++ b/AerospikeClient/Main/Txn.cs @@ -51,11 +51,15 @@ public enum TxnState /// is used to avoid client/server clock skew issues. For internal use only. /// internal int Deadline { get; set; } - + /// /// MRT timeout in seconds. The timer starts when the MRT monitor record is created. /// This occurs when the first command in the MRT is executed. If the timeout is reached before /// a commit or abort is called, the server will expire and rollback the MRT. + /// + /// If the MRT timeout is zero, the server configuration mrt-duration is used. + /// The default mrt-duration is 10 seconds. + /// /// public int Timeout { get; set; } @@ -65,21 +69,27 @@ public enum TxnState /// /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with - /// default capacities. The default MRT timeout is 10 seconds. + /// default capacities. + /// + /// The default client MRT timeout is zero.This means use the server configuration mrt-duration + /// as the MRT timeout. The default mrt-duration is 10 seconds. + /// /// public Txn() { Id = CreateId(); Reads = new ConcurrentHashMap(); Writes = new ConcurrentHashSet(); - Deadline = 0; State = TxnState.OPEN; - Timeout = 10; // seconds } /// /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with - /// given capacities. The default MRT timeout is 10 seconds. + /// given capacities. + /// + /// The default client MRT timeout is zero.This means use the server configuration mrt-duration + /// as the MRT timeout. The default mrt-duration is 10 seconds. + /// /// /// expected number of record reads in the MRT. Minimum value is 16. /// expected number of record writes in the MRT. Minimum value is 16. @@ -98,9 +108,7 @@ public Txn(int readsCapacity, int writesCapacity) Id = CreateId(); Reads = new ConcurrentHashMap(readsCapacity); Writes = new ConcurrentHashSet(writesCapacity); - Deadline = 0; State = TxnState.OPEN; - Timeout = 10; // seconds } [System.Runtime.CompilerServices.MethodImpl(System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)] @@ -199,7 +207,7 @@ internal void OnRead(Key key, long? version) /// /// /// - public void OnWrite(Key key, long? version, int resultCode) + internal void OnWrite(Key key, long? version, int resultCode) { // Write commands set namespace prior to sending the command, so there is // no need to call it here when receiving the response. @@ -220,7 +228,7 @@ public void OnWrite(Key key, long? version, int resultCode) /// /// Add key to write hash when write command is in doubt (usually caused by timeout). /// - public void OnWriteInDoubt(Key key) + internal void OnWriteInDoubt(Key key) { Reads.Remove(key); Writes.Add(key); diff --git a/AerospikeClient/Policy/BatchDeletePolicy.cs b/AerospikeClient/Policy/BatchDeletePolicy.cs index 9d99775a..d8d28978 100644 --- a/AerospikeClient/Policy/BatchDeletePolicy.cs +++ b/AerospikeClient/Policy/BatchDeletePolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. diff --git a/AerospikeClient/Policy/BatchPolicy.cs b/AerospikeClient/Policy/BatchPolicy.cs index 79a039f9..31860cdd 100644 --- a/AerospikeClient/Policy/BatchPolicy.cs +++ b/AerospikeClient/Policy/BatchPolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. diff --git a/AerospikeClient/Policy/BatchWritePolicy.cs b/AerospikeClient/Policy/BatchWritePolicy.cs index 7618cc04..78932fc5 100644 --- a/AerospikeClient/Policy/BatchWritePolicy.cs +++ b/AerospikeClient/Policy/BatchWritePolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. diff --git a/AerospikeClient/Policy/CommitLevel.cs b/AerospikeClient/Policy/CommitLevel.cs index aa4f5240..9cbbb11d 100644 --- a/AerospikeClient/Policy/CommitLevel.cs +++ b/AerospikeClient/Policy/CommitLevel.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2018 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. diff --git a/AerospikeClient/Query/RecordSet.cs b/AerospikeClient/Query/RecordSet.cs index 26a172ae..03e69a78 100644 --- a/AerospikeClient/Query/RecordSet.cs +++ b/AerospikeClient/Query/RecordSet.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. diff --git a/AerospikeClient/Query/ResultSet.cs b/AerospikeClient/Query/ResultSet.cs index a80481db..37ee4450 100644 --- a/AerospikeClient/Query/ResultSet.cs +++ b/AerospikeClient/Query/ResultSet.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2019 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. From d31e0bdc84256cbe2048a7bb181ce8ad76cd0fef Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Tue, 3 Dec 2024 14:25:16 -0700 Subject: [PATCH 36/41] Add Clone method to policies, changes to result codes for MRT --- AerospikeClient/Async/AsyncTxnAddKeys.cs | 5 - AerospikeClient/Async/AsyncTxnRoll.cs | 4 +- AerospikeClient/Command/TxnAddKeys.cs | 5 - AerospikeClient/Command/TxnRoll.cs | 6 +- AerospikeClient/Main/AerospikeClient.cs | 60 ++++++----- AerospikeClient/Main/AerospikeException.cs | 2 +- AerospikeClient/Main/IAerospikeClient.cs | 36 ++++--- AerospikeClient/Main/ResultCode.cs | 112 ++++++++++---------- AerospikeClient/Main/Txn.cs | 22 ++-- AerospikeClient/Policy/AdminPolicy.cs | 11 +- AerospikeClient/Policy/BatchDeletePolicy.cs | 9 ++ AerospikeClient/Policy/BatchPolicy.cs | 9 ++ AerospikeClient/Policy/BatchReadPolicy.cs | 9 ++ AerospikeClient/Policy/BatchUDFPolicy.cs | 9 ++ AerospikeClient/Policy/BatchWritePolicy.cs | 9 ++ AerospikeClient/Policy/ClientPolicy.cs | 9 ++ AerospikeClient/Policy/InfoPolicy.cs | 11 +- AerospikeClient/Policy/Policy.cs | 9 ++ AerospikeClient/Policy/QueryPolicy.cs | 9 ++ AerospikeClient/Policy/ScanPolicy.cs | 9 ++ AerospikeClient/Policy/TlsPolicy.cs | 9 ++ AerospikeClient/Policy/TxnRollPolicy.cs | 9 ++ AerospikeClient/Policy/TxnVerifyPolicy.cs | 9 ++ AerospikeClient/Policy/WritePolicy.cs | 11 +- AerospikeTest/Async/TestAsyncTxn.cs | 16 +-- AerospikeTest/Sync/Basic/TestTxn.cs | 56 +++++----- 26 files changed, 305 insertions(+), 160 deletions(-) diff --git a/AerospikeClient/Async/AsyncTxnAddKeys.cs b/AerospikeClient/Async/AsyncTxnAddKeys.cs index 3c472cc1..d797db74 100644 --- a/AerospikeClient/Async/AsyncTxnAddKeys.cs +++ b/AerospikeClient/Async/AsyncTxnAddKeys.cs @@ -70,11 +70,6 @@ protected internal override bool PrepareRetry(bool timeout) return true; } - protected internal override void OnInDoubt() - { - policy.Txn.SetMonitorInDoubt(); - } - protected internal override void OnSuccess() { if (listener != null) diff --git a/AerospikeClient/Async/AsyncTxnRoll.cs b/AerospikeClient/Async/AsyncTxnRoll.cs index 053d7680..f1474e00 100644 --- a/AerospikeClient/Async/AsyncTxnRoll.cs +++ b/AerospikeClient/Async/AsyncTxnRoll.cs @@ -194,7 +194,7 @@ private void Roll(BatchRecordArrayListener rollListener, int txnAttr) private void CloseOnCommit(bool verified) { - if (!txn.MonitorMightExist()) + if (!txn.CloseMonitor()) { if (verified) { @@ -227,7 +227,7 @@ private void CloseOnCommit(bool verified) private void CloseOnAbort() { - if (!txn.MonitorMightExist()) + if (!txn.CloseMonitor()) { // There is no MRT monitor record to remove. NotifyAbortSuccess(AbortStatusType.OK); diff --git a/AerospikeClient/Command/TxnAddKeys.cs b/AerospikeClient/Command/TxnAddKeys.cs index ea38caa7..fafe93d4 100644 --- a/AerospikeClient/Command/TxnAddKeys.cs +++ b/AerospikeClient/Command/TxnAddKeys.cs @@ -44,10 +44,5 @@ protected internal override void ParseResult(Connection conn) throw new AerospikeException(resultCode); } - - protected internal override void OnInDoubt() - { - policy.Txn.SetMonitorInDoubt(); - } } } diff --git a/AerospikeClient/Command/TxnRoll.cs b/AerospikeClient/Command/TxnRoll.cs index d5b3e48f..9b96d1a9 100644 --- a/AerospikeClient/Command/TxnRoll.cs +++ b/AerospikeClient/Command/TxnRoll.cs @@ -58,7 +58,7 @@ public void Verify(BatchPolicy verifyPolicy, BatchPolicy rollPolicy) throw CreateCommitException(CommitErrorType.VERIFY_FAIL_ABORT_ABANDONED, e, e2); } - if (txn.MonitorMightExist()) + if (txn.CloseMonitor()) { try { @@ -142,7 +142,7 @@ public CommitStatusType Commit(BatchPolicy rollPolicy) } } - if (txn.MonitorMightExist()) + if (txn.CloseMonitor()) { // Remove MRT monitor. try @@ -206,7 +206,7 @@ public AbortStatusType Abort(BatchPolicy rollPolicy) return AbortStatusType.ROLL_BACK_ABANDONED; } - if (txn.MonitorMightExist()) + if (txn.CloseMonitor()) { try { diff --git a/AerospikeClient/Main/AerospikeClient.cs b/AerospikeClient/Main/AerospikeClient.cs index ad09ad8d..cbf897da 100644 --- a/AerospikeClient/Main/AerospikeClient.cs +++ b/AerospikeClient/Main/AerospikeClient.cs @@ -256,125 +256,137 @@ protected internal AerospikeClient(ClientPolicy policy) /// /// Default read policy that is used when read command policy is null. - /// Get returns a copy of the read policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// public Policy ReadPolicyDefault { - get { return new Policy(readPolicyDefault); } + get { return readPolicyDefault; } set { readPolicyDefault = value; } } /// /// Default write policy that is used when write command policy is null. - /// Get returns a copy of the write policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// public WritePolicy WritePolicyDefault { - get { return new WritePolicy(writePolicyDefault); } + get { return writePolicyDefault; } set { writePolicyDefault = value; } } /// /// Default scan policy that is used when scan command policy is null. - /// Get returns a copy of the scan policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// public ScanPolicy ScanPolicyDefault { - get { return new ScanPolicy(scanPolicyDefault); } + get { return scanPolicyDefault; } set { scanPolicyDefault = value; } } /// /// Default query policy that is used when query command policy is null. - /// Get returns a copy of the query policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// public QueryPolicy QueryPolicyDefault { - get { return new QueryPolicy(queryPolicyDefault); } + get { return queryPolicyDefault; } set { queryPolicyDefault = value; } } /// /// Default parent policy used in batch read commands. Parent policy fields /// include socketTimeout, totalTimeout, maxRetries, etc... - /// Get returns a copy of the batch header read policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// public BatchPolicy BatchPolicyDefault { - get { return new BatchPolicy(batchPolicyDefault); } + get { return batchPolicyDefault; } set { batchPolicyDefault = value; } } /// /// Default parent policy used in batch write commands. Parent policy fields /// include socketTimeout, totalTimeout, maxRetries, etc... - /// Get returns a copy of the batch header write policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// public BatchPolicy BatchParentPolicyWriteDefault { - get { return new BatchPolicy(batchParentPolicyWriteDefault); } + get { return batchParentPolicyWriteDefault; } set { batchParentPolicyWriteDefault = value; } } /// /// Default write policy used in batch operate commands. /// Write policy fields include generation, expiration, durableDelete, etc... - /// Get returns a copy of the batch detail write policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// public BatchWritePolicy BatchWritePolicyDefault { - get { return new BatchWritePolicy(batchWritePolicyDefault); } + get { return batchWritePolicyDefault; } set { batchWritePolicyDefault = value; } } /// /// Default delete policy used in batch delete commands. - /// Get returns a copy of the batch detail delete policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// public BatchDeletePolicy BatchDeletePolicyDefault { - get { return new BatchDeletePolicy(batchDeletePolicyDefault); } + get { return batchDeletePolicyDefault; } set { batchDeletePolicyDefault = value; } } /// /// Default user defined function policy used in batch UDF excecute commands. - /// Get returns a copy of the batch detail UDF policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command.er modified. /// public BatchUDFPolicy BatchUDFPolicyDefault { - get { return new BatchUDFPolicy(batchUDFPolicyDefault); } + get { return batchUDFPolicyDefault; } set { batchUDFPolicyDefault = value; } } /// /// Default info policy that is used when info command policy is null. - /// Get returns a copy of the info command policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// public InfoPolicy InfoPolicyDefault { - get { return new InfoPolicy(infoPolicyDefault); } + get { return infoPolicyDefault; } set { infoPolicyDefault = value; } } /// /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. - /// Get returns a copy of the txn verify policy default. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// public TxnVerifyPolicy TxnVerifyPolicyDefault { - get { return new TxnVerifyPolicy(txnVerifyPolicyDefault); } + get { return txnVerifyPolicyDefault; } set { txnVerifyPolicyDefault = value; } } /// /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) /// or back(abort) in a batch. - /// Get returns a copy of the txn roll policy default. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// public TxnRollPolicy TxnRollPolicyDefault { - get { return new TxnRollPolicy(txnRollPolicyDefault); } + get { return txnRollPolicyDefault; } set { txnRollPolicyDefault = value; } } diff --git a/AerospikeClient/Main/AerospikeException.cs b/AerospikeClient/Main/AerospikeException.cs index ed73da91..0e883c55 100644 --- a/AerospikeClient/Main/AerospikeException.cs +++ b/AerospikeClient/Main/AerospikeException.cs @@ -576,7 +576,7 @@ public Backoff(int resultCode) : base(resultCode) } /// - /// Exception thrown when fails. + /// Exception thrown when a multi-record transaction commit fails. /// Commit Exception has similar behavior to AggregateException. /// might be populated if mutliple exceptions contribute to the failure. /// diff --git a/AerospikeClient/Main/IAerospikeClient.cs b/AerospikeClient/Main/IAerospikeClient.cs index 5a825919..210dab9d 100644 --- a/AerospikeClient/Main/IAerospikeClient.cs +++ b/AerospikeClient/Main/IAerospikeClient.cs @@ -26,77 +26,89 @@ public interface IAerospikeClient /// /// Default read policy that is used when read command policy is null. - /// Get returns a copy of the read policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// Policy ReadPolicyDefault { get; set; } /// /// Default write policy that is used when write command policy is null. - /// Get returns a copy of the write policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// WritePolicy WritePolicyDefault { get; set; } /// /// Default scan policy that is used when scan command policy is null. - /// Get returns a copy of the scan policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// ScanPolicy ScanPolicyDefault { get; set; } /// /// Default query policy that is used when query command policy is null. - /// Get returns a copy of the query policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// QueryPolicy QueryPolicyDefault { get; set; } /// /// Default parent policy used in batch read commands.Parent policy fields /// include socketTimeout, totalTimeout, maxRetries, etc... - /// Get returns a copy of the batch header read policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// BatchPolicy BatchPolicyDefault { get; set; } /// /// Default parent policy used in batch write commands. Parent policy fields /// include socketTimeout, totalTimeout, maxRetries, etc... - /// Get returns a copy of the batch header write policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// BatchPolicy BatchParentPolicyWriteDefault { get; set; } /// /// Default write policy used in batch operate commands. /// Write policy fields include generation, expiration, durableDelete, etc... - /// Get returns a copy of the batch detail write policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// BatchWritePolicy BatchWritePolicyDefault { get; set; } /// /// Default delete policy used in batch delete commands. - /// Get returns a copy of the batch detail delete policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// BatchDeletePolicy BatchDeletePolicyDefault { get; set; } /// /// Default user defined function policy used in batch UDF execute commands. - /// Get returns a copy of the batch detail UDF policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// BatchUDFPolicy BatchUDFPolicyDefault { get; set; } /// /// Default info policy that is used when info command policy is null. - /// Get returns a copy of the info command policy default to avoid problems if this shared instance is later modified. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// InfoPolicy InfoPolicyDefault { get; set; } /// /// Default multi-record transactions (MRT) policy when verifying record versions in a batch on a commit. - /// Get returns a copy of the txn verify policy default. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// TxnVerifyPolicy TxnVerifyPolicyDefault { get; set; } /// /// Default multi-record transactions (MRT) policy when rolling the transaction records forward (commit) /// or back(abort) in a batch. - /// Get returns a copy of the txn roll policy default. + /// Use when the policy will not be modified. Use + /// when the policy will be modified for use in a specific command. /// TxnRollPolicy TxnRollPolicyDefault { get; set; } diff --git a/AerospikeClient/Main/ResultCode.cs b/AerospikeClient/Main/ResultCode.cs index d66ab3cf..9e701fb6 100644 --- a/AerospikeClient/Main/ResultCode.cs +++ b/AerospikeClient/Main/ResultCode.cs @@ -14,6 +14,8 @@ * License for the specific language governing permissions and limitations under * the License. */ +using System.Transactions; + namespace Aerospike.Client { /// @@ -282,49 +284,12 @@ public sealed class ResultCode /// public const int LOST_CONFLICT = 28; - /// - /// MRT record blocked by a different transaction. - /// Value: 29 - /// - public const int MRT_BLOCKED = 29; - - /// - /// MRT read version mismatch identified during commit. - /// Some other command changed the record outside of the transaction. - /// Value: 30 - /// - public const int MRT_VERSION_MISMATCH = 30; - - /// - /// MRT deadline reached without a successful commit or abort. - /// Value: 31 - /// - public const int MRT_EXPIRED = 31; - /// /// Write can't complete until XDR finishes shipping. /// Value: 32 /// public const int XDR_KEY_BUSY = 32; - /// - /// MRT was already committed. - /// Value: 33 - /// - public const int MRT_COMMITTED = 33; - - /// - /// MRT was already aborted. - /// Value: 34 - /// - public const int MRT_ABORTED = 34; - - /// - /// MRT write command limit (4096) exceeded. - /// Value: 35 - /// - public const int MRT_TOO_MANY_WRITES = 35; - /// /// There are no more records left for query. /// Value: 50 @@ -475,6 +440,43 @@ public sealed class ResultCode /// public const int UDF_BAD_RESPONSE = 100; + /// + /// MRT record blocked by a different transaction. + /// Value: 120 + /// + public const int MRT_BLOCKED = 120; + + /// + /// MRT read version mismatch identified during commit. + /// Some other command changed the record outside of the transaction. + /// Value: 121 + /// + public const int MRT_VERSION_MISMATCH = 121; + + /// + /// MRT deadline reached without a successful commit or abort. + /// Value: 122 + /// + public const int MRT_EXPIRED = 122; + + /// + /// MRT write command limit (4096) exceeded. + /// Value: 123 + /// + public const int MRT_TOO_MANY_WRITES = 123; + + /// + /// MRT was already committed. + /// Value: 124 + /// + public const int MRT_COMMITTED = 124; + + /// + /// MRT was already aborted. + /// Value: 125 + /// + public const int MRT_ABORTED = 125; + /// /// Batch functionality has been disabled. /// Value: 150 @@ -720,27 +722,9 @@ public static string GetResultString(int resultCode) case LOST_CONFLICT: return "Command failed due to conflict with XDR"; - case MRT_BLOCKED: - return "MRT record blocked by a different transaction"; - - case MRT_VERSION_MISMATCH: - return "MRT version mismatch"; - - case MRT_EXPIRED: - return "MRT expired"; - case XDR_KEY_BUSY: return "Write can't complete until XDR finishes shipping."; - case MRT_COMMITTED: - return "MRT already committed"; - - case MRT_ABORTED: - return "MRT already aborted"; - - case MRT_TOO_MANY_WRITES: - return "MRT write command limit exceeded"; - case QUERY_END: return "Query end"; @@ -816,6 +800,24 @@ public static string GetResultString(int resultCode) case UDF_BAD_RESPONSE: return "UDF returned error"; + case MRT_BLOCKED: + return "MRT record blocked by a different transaction"; + + case MRT_VERSION_MISMATCH: + return "MRT version mismatch"; + + case MRT_EXPIRED: + return "MRT expired"; + + case MRT_TOO_MANY_WRITES: + return "MRT write command limit exceeded"; + + case MRT_COMMITTED: + return "MRT already committed"; + + case MRT_ABORTED: + return "MRT already aborted"; + case BATCH_DISABLED: return "Batch functionality has been disabled"; diff --git a/AerospikeClient/Main/Txn.cs b/AerospikeClient/Main/Txn.cs index e01d0b76..c2faea39 100644 --- a/AerospikeClient/Main/Txn.cs +++ b/AerospikeClient/Main/Txn.cs @@ -63,7 +63,7 @@ public enum TxnState /// public int Timeout { get; set; } - private bool monitorInDoubt; + private bool writeInDoubt; public bool InDoubt { get; internal set; } @@ -71,7 +71,7 @@ public enum TxnState /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with /// default capacities. /// - /// The default client MRT timeout is zero.This means use the server configuration mrt-duration + /// The default client MRT timeout is zero. This means use the server configuration mrt-duration /// as the MRT timeout. The default mrt-duration is 10 seconds. /// ///
    @@ -87,7 +87,7 @@ public Txn() /// Create MRT, assign random transaction id and initialize reads/writes hashmaps with /// given capacities. /// - /// The default client MRT timeout is zero.This means use the server configuration mrt-duration + /// The default client MRT timeout is zero. This means use the server configuration mrt-duration /// as the MRT timeout. The default mrt-duration is 10 seconds. /// ///
    @@ -230,6 +230,7 @@ internal void OnWrite(Key key, long? version, int resultCode) ///
    internal void OnWriteInDoubt(Key key) { + writeInDoubt = true; Reads.Remove(key); Writes.Add(key); } @@ -275,19 +276,12 @@ internal void SetNamespace(List records) } /// - /// Set that the MRT monitor existence is in doubt. + /// Return if the MRT monitor record should be closed/deleted /// - internal void SetMonitorInDoubt() - { - this.monitorInDoubt = true; - } - - /// - /// Does MRT monitor record exist or is in doubt. - /// - public bool MonitorMightExist() + /// + internal bool CloseMonitor() { - return Deadline != 0 || monitorInDoubt; + return Deadline != 0 && !writeInDoubt; } /// diff --git a/AerospikeClient/Policy/AdminPolicy.cs b/AerospikeClient/Policy/AdminPolicy.cs index 8e455649..a66724f5 100644 --- a/AerospikeClient/Policy/AdminPolicy.cs +++ b/AerospikeClient/Policy/AdminPolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2019 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -41,5 +41,14 @@ public AdminPolicy(AdminPolicy other) public AdminPolicy() { } + + /// + /// Creates a deep copy of this admin policy. + /// + /// + public AdminPolicy Clone() + { + return new AdminPolicy(this); + } } } diff --git a/AerospikeClient/Policy/BatchDeletePolicy.cs b/AerospikeClient/Policy/BatchDeletePolicy.cs index d8d28978..c477eec6 100644 --- a/AerospikeClient/Policy/BatchDeletePolicy.cs +++ b/AerospikeClient/Policy/BatchDeletePolicy.cs @@ -101,5 +101,14 @@ public BatchDeletePolicy(BatchDeletePolicy other) public BatchDeletePolicy() { } + + /// + /// Creates a deep copy of this batch delete policy. + /// + /// + public BatchDeletePolicy Clone() + { + return new BatchDeletePolicy(this); + } } } diff --git a/AerospikeClient/Policy/BatchPolicy.cs b/AerospikeClient/Policy/BatchPolicy.cs index 31860cdd..0ffe8270 100644 --- a/AerospikeClient/Policy/BatchPolicy.cs +++ b/AerospikeClient/Policy/BatchPolicy.cs @@ -189,6 +189,15 @@ public static BatchPolicy WriteDefault() policy.maxRetries = 0; return policy; } + + /// + /// Creates a deep copy of this batch policy. + /// + /// + public new BatchPolicy Clone() + { + return new BatchPolicy(this); + } } } diff --git a/AerospikeClient/Policy/BatchReadPolicy.cs b/AerospikeClient/Policy/BatchReadPolicy.cs index b6a5f9cf..3a9a9a52 100644 --- a/AerospikeClient/Policy/BatchReadPolicy.cs +++ b/AerospikeClient/Policy/BatchReadPolicy.cs @@ -93,5 +93,14 @@ public BatchReadPolicy(BatchReadPolicy other) public BatchReadPolicy() { } + + /// + /// Creates a deep copy of this batch read policy. + /// + /// + public BatchReadPolicy Clone() + { + return new BatchReadPolicy(this); + } } } diff --git a/AerospikeClient/Policy/BatchUDFPolicy.cs b/AerospikeClient/Policy/BatchUDFPolicy.cs index 600cfc13..b6cb9945 100644 --- a/AerospikeClient/Policy/BatchUDFPolicy.cs +++ b/AerospikeClient/Policy/BatchUDFPolicy.cs @@ -99,5 +99,14 @@ public BatchUDFPolicy(BatchUDFPolicy other) public BatchUDFPolicy() { } + + /// + /// Creates a deep copy of this batch UDF policy. + /// + /// + public BatchUDFPolicy Clone() + { + return new BatchUDFPolicy(this); + } } } diff --git a/AerospikeClient/Policy/BatchWritePolicy.cs b/AerospikeClient/Policy/BatchWritePolicy.cs index 78932fc5..28d3dbb8 100644 --- a/AerospikeClient/Policy/BatchWritePolicy.cs +++ b/AerospikeClient/Policy/BatchWritePolicy.cs @@ -127,5 +127,14 @@ public BatchWritePolicy(BatchWritePolicy other) public BatchWritePolicy() { } + + /// + /// Creates a deep copy of this batch write policy. + /// + /// + public BatchWritePolicy Clone() + { + return new BatchWritePolicy(this); + } } } diff --git a/AerospikeClient/Policy/ClientPolicy.cs b/AerospikeClient/Policy/ClientPolicy.cs index 12ef85fe..a9d9af38 100644 --- a/AerospikeClient/Policy/ClientPolicy.cs +++ b/AerospikeClient/Policy/ClientPolicy.cs @@ -365,5 +365,14 @@ public ClientPolicy(ClientPolicy other) public ClientPolicy() { } + + /// + /// Creates a deep copy of this client policy. + /// + /// + public ClientPolicy Clone() + { + return new ClientPolicy(this); + } } } diff --git a/AerospikeClient/Policy/InfoPolicy.cs b/AerospikeClient/Policy/InfoPolicy.cs index a3852522..2e7c3a35 100644 --- a/AerospikeClient/Policy/InfoPolicy.cs +++ b/AerospikeClient/Policy/InfoPolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2019 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -50,5 +50,14 @@ public InfoPolicy() { timeout = 1000; } + + /// + /// Creates a deep copy of this info policy. + /// + /// + public InfoPolicy Clone() + { + return new InfoPolicy(this); + } } } diff --git a/AerospikeClient/Policy/Policy.cs b/AerospikeClient/Policy/Policy.cs index 68c11f0b..524f4756 100644 --- a/AerospikeClient/Policy/Policy.cs +++ b/AerospikeClient/Policy/Policy.cs @@ -334,6 +334,15 @@ public void SetTimeouts(int socketTimeout, int totalTimeout) this.socketTimeout = totalTimeout; } } + + /// + /// Creates a deep copy of this policy. + /// + /// + public Policy Clone() + { + return new Policy(this); + } } } #pragma warning restore 0618 diff --git a/AerospikeClient/Policy/QueryPolicy.cs b/AerospikeClient/Policy/QueryPolicy.cs index b5ce8331..dbf20500 100644 --- a/AerospikeClient/Policy/QueryPolicy.cs +++ b/AerospikeClient/Policy/QueryPolicy.cs @@ -146,6 +146,15 @@ public QueryPolicy() : base() base.totalTimeout = 0; base.maxRetries = 5; } + + /// + /// Creates a deep copy of this query policy. + /// + /// + public new QueryPolicy Clone() + { + return new QueryPolicy(this); + } } } #pragma warning restore 0618 diff --git a/AerospikeClient/Policy/ScanPolicy.cs b/AerospikeClient/Policy/ScanPolicy.cs index fba0b129..7b912afb 100644 --- a/AerospikeClient/Policy/ScanPolicy.cs +++ b/AerospikeClient/Policy/ScanPolicy.cs @@ -123,6 +123,15 @@ public ScanPolicy() : base() base.totalTimeout = 0; base.maxRetries = 5; } + + /// + /// Creates a deep copy of this scan policy. + /// + /// + public new ScanPolicy Clone() + { + return new ScanPolicy(this); + } } } #pragma warning restore 0618 diff --git a/AerospikeClient/Policy/TlsPolicy.cs b/AerospikeClient/Policy/TlsPolicy.cs index d03e4831..1b64bece 100644 --- a/AerospikeClient/Policy/TlsPolicy.cs +++ b/AerospikeClient/Policy/TlsPolicy.cs @@ -151,5 +151,14 @@ private void ParseClientCertificateFile(string clientCertificateFile) clientCertificates = new X509CertificateCollection(); clientCertificates.Add(cert); } + + /// + /// Creates a deep copy of this TLS policy. + /// + /// + public TlsPolicy Clone() + { + return new TlsPolicy(this); + } } } diff --git a/AerospikeClient/Policy/TxnRollPolicy.cs b/AerospikeClient/Policy/TxnRollPolicy.cs index 026703ab..a05a00fe 100644 --- a/AerospikeClient/Policy/TxnRollPolicy.cs +++ b/AerospikeClient/Policy/TxnRollPolicy.cs @@ -42,5 +42,14 @@ public TxnRollPolicy() totalTimeout = 10000; sleepBetweenRetries = 1000; } + + /// + /// Creates a deep copy of this txn roll policy. + /// + /// + public new TxnRollPolicy Clone() + { + return new TxnRollPolicy(this); + } } } diff --git a/AerospikeClient/Policy/TxnVerifyPolicy.cs b/AerospikeClient/Policy/TxnVerifyPolicy.cs index 30bd16ed..495a793c 100644 --- a/AerospikeClient/Policy/TxnVerifyPolicy.cs +++ b/AerospikeClient/Policy/TxnVerifyPolicy.cs @@ -43,5 +43,14 @@ public TxnVerifyPolicy() totalTimeout = 10000; sleepBetweenRetries = 1000; } + + /// + /// Creates a deep copy of this txn verify policy. + /// + /// + public new TxnVerifyPolicy Clone() + { + return new TxnVerifyPolicy(this); + } } } diff --git a/AerospikeClient/Policy/WritePolicy.cs b/AerospikeClient/Policy/WritePolicy.cs index ce18b23c..421f95cd 100644 --- a/AerospikeClient/Policy/WritePolicy.cs +++ b/AerospikeClient/Policy/WritePolicy.cs @@ -1,5 +1,5 @@ /* - * Copyright 2012-2019 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements. @@ -131,5 +131,14 @@ public WritePolicy() // Writes are not retried by default. base.maxRetries = 0; } + + /// + /// Creates a deep copy of this write policy. + /// + /// + public new WritePolicy Clone() + { + return new WritePolicy(this); + } } } diff --git a/AerospikeTest/Async/TestAsyncTxn.cs b/AerospikeTest/Async/TestAsyncTxn.cs index e1df7228..af397f37 100644 --- a/AerospikeTest/Async/TestAsyncTxn.cs +++ b/AerospikeTest/Async/TestAsyncTxn.cs @@ -653,7 +653,7 @@ public void Run(TestAsyncTxn parent, Listener listener) WritePolicy wp = null; if (txn != null) { - wp = client.WritePolicyDefault; + wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; } client.Put(wp, new PutHandler(listener, expectedResult), key, bins); @@ -718,7 +718,7 @@ public void Run(TestAsyncTxn parent, Listener listener) if (txn != null) { - p = client.ReadPolicyDefault; + p = client.ReadPolicyDefault.Clone(); p.Txn = txn; } client.Get(p, new GetExpectHandler(parent, listener, expect, generation), key); @@ -801,7 +801,7 @@ public void Run(TestAsyncTxn parent, Listener listener) if (txn != null) { - wp = client.WritePolicyDefault; + wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; } client.Operate(wp, new OperateExpectHandler(parent, listener, expect), key, ops); @@ -882,7 +882,7 @@ public void Run(TestAsyncTxn parent, Listener listener) if (txn != null) { - wp = client.WritePolicyDefault; + wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; } client.Execute(wp, new UDFHandler(listener), key, packageName, functionName, functionArgs); @@ -927,7 +927,7 @@ public void Run(TestAsyncTxn parent, Listener listener) BatchPolicy bp = null; if (txn != null) { - bp = client.BatchPolicyDefault; + bp = client.BatchPolicyDefault.Clone(); bp.Txn = txn; } client.Get(bp, new BatchGetExpectHandler(parent, listener, expected), keys); @@ -984,7 +984,7 @@ public void Run(TestAsyncTxn parent, Listener listener) if (txn != null) { - bp = client.BatchParentPolicyWriteDefault; + bp = client.BatchParentPolicyWriteDefault.Clone(); bp.Txn = txn; } client.Operate(bp, null, new BatchOperateHandler(listener), keys, ops); @@ -1050,7 +1050,7 @@ public void Run(TestAsyncTxn parent, Listener listener) WritePolicy wp = null; if (txn != null) { - wp = client.WritePolicyDefault; + wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; } client.Touch(wp, new TouchHandler(listener), key); @@ -1094,7 +1094,7 @@ public void Run(TestAsyncTxn parent, Listener listener) if (txn != null) { - wp = client.WritePolicyDefault; + wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; wp.durableDelete = true; } diff --git a/AerospikeTest/Sync/Basic/TestTxn.cs b/AerospikeTest/Sync/Basic/TestTxn.cs index 6a7c3f41..b5024183 100644 --- a/AerospikeTest/Sync/Basic/TestTxn.cs +++ b/AerospikeTest/Sync/Basic/TestTxn.cs @@ -43,7 +43,7 @@ public void TxnWrite() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; client.Put(wp, key, new Bin(binName, "val2")); @@ -60,7 +60,7 @@ public void TxnWriteTwice() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; client.Put(wp, key, new Bin(binName, "val1")); client.Put(wp, key, new Bin(binName, "val2")); @@ -79,8 +79,8 @@ public void TxnWriteConflict() Txn txn1 = new(); Txn txn2 = new(); - WritePolicy wp1 = client.WritePolicyDefault; - WritePolicy wp2 = client.WritePolicyDefault; + WritePolicy wp1 = client.WritePolicyDefault.Clone(); + WritePolicy wp2 = client.WritePolicyDefault.Clone(); wp1.Txn = txn1; wp2.Txn = txn2; @@ -115,7 +115,7 @@ public void TxnWriteBlock() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; client.Put(wp, key, new Bin(binName, "val2")); @@ -145,7 +145,7 @@ public void TxnWriteRead() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; client.Put(wp, key, new Bin(binName, "val2")); @@ -169,11 +169,11 @@ public void TxnWriteAbort() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; client.Put(wp, key, new Bin(binName, "val2")); - Policy p = client.ReadPolicyDefault; + Policy p = client.ReadPolicyDefault.Clone(); p.Txn = txn; Record record = client.Get(p, key); AssertBinEqual(key, record, binName, "val2"); @@ -194,7 +194,7 @@ public void TxnDelete() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; wp.durableDelete = true; client.Delete(wp, key); @@ -214,7 +214,7 @@ public void TxnDeleteAbort() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; wp.durableDelete = true; client.Delete(wp, key); @@ -234,7 +234,7 @@ public void TxnDeleteTwice() client.Put(null, key, new Bin(binName, "val1")); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; wp.durableDelete = true; client.Delete(wp, key); @@ -255,7 +255,7 @@ public void TxnTouch() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; client.Touch(wp, key); @@ -274,7 +274,7 @@ public void TxnTouchAbort() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; client.Touch(wp, key); @@ -293,7 +293,7 @@ public void TxnOperateWrite() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; Record record = client.Operate(wp, key, Operation.Put(new Bin(binName, "val2")), @@ -316,7 +316,7 @@ public void TxnOperateWriteAbort() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; Record record = client.Operate(wp, key, Operation.Put(new Bin(binName, "val2")), @@ -339,7 +339,7 @@ public void TxnUDF() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); @@ -358,7 +358,7 @@ public void TxnUDFAbort() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; client.Execute(wp, key, "record_example", "writeBin", Value.Get(binName), Value.Get("val2")); @@ -485,11 +485,11 @@ public void TxnWriteCommitAbort() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; client.Put(wp, key, new Bin(binName, "val2")); - Policy p = client.ReadPolicyDefault; + Policy p = client.ReadPolicyDefault.Clone(); p.Txn = txn; Record record = client.Get(p, key); AssertBinEqual(key, record, binName, "val2"); @@ -512,12 +512,12 @@ public void TxnWriteReadTwoTxn() client.Put(null, key, new Bin(binName, "val1")); - var rp1 = client.ReadPolicyDefault; + var rp1 = client.ReadPolicyDefault.Clone(); rp1.Txn = txn1; var record = client.Get(rp1, key); AssertBinEqual(key, record, binName, "val1"); - var rp2 = client.ReadPolicyDefault; + var rp2 = client.ReadPolicyDefault.Clone(); rp2.Txn = txn2; record = client.Get(rp2, key); AssertBinEqual(key, record, binName, "val1"); @@ -542,11 +542,11 @@ public void TxnLUTCommit() // Test Case 38 client.Delete(null, key2); client.Delete(null, key3); - var wp = client.WritePolicyDefault; + var wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; client.Put(wp, key1, new Bin(binName, "val1")); // T1 - var p = client.ReadPolicyDefault; + var p = client.ReadPolicyDefault.Clone(); p.Txn = txn; var record = client.Get(p, key1); // T2 Assert.AreEqual(1, record.generation); @@ -598,7 +598,7 @@ public void TxnLUTAbort() // Test Case 39 client.Put(null, key1, new Bin(binName, "val1")); // T1 - var p = client.ReadPolicyDefault; + var p = client.ReadPolicyDefault.Clone(); p.Txn = txn; var record = client.Get(p, key1); // T2 Assert.AreEqual(1, record.generation); @@ -608,7 +608,7 @@ public void TxnLUTAbort() // Test Case 39 record = client.Get(p, key2); // T4 Assert.AreEqual(1, record.generation); - var wp = client.WritePolicyDefault; + var wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; client.Put(wp, key2, new Bin(binName, "val11")); // T5 @@ -646,7 +646,7 @@ record = client.Get(p, key1); { client.Commit(txn); // T10 } - catch (AerospikeException.Commit ae) + catch (AerospikeException.Commit) { } @@ -671,7 +671,7 @@ public void TxnWriteAfterCommit() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; client.Put(wp, key, new Bin(binName, "val1")); @@ -699,7 +699,7 @@ public void TxnInvalidNamespace() Txn txn = new(); - WritePolicy wp = client.WritePolicyDefault; + WritePolicy wp = client.WritePolicyDefault.Clone(); wp.Txn = txn; try From 8defe6cf6f7e8e7cc2b4d5fa8e65508e9ba91ee8 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Thu, 5 Dec 2024 09:10:43 -0700 Subject: [PATCH 37/41] Use 8.0 rc in pipeline --- .github/actions/run-ee-server/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/run-ee-server/action.yml b/.github/actions/run-ee-server/action.yml index b014669b..1ebe53f5 100644 --- a/.github/actions/run-ee-server/action.yml +++ b/.github/actions/run-ee-server/action.yml @@ -11,7 +11,7 @@ inputs: server-tag: required: true description: Specify Docker tag - default: '8.0.0.0-alpha5_1' + default: '8.0.0.0-rc1' # Github Composite Actions can't access secrets # so we need to pass them in as inputs docker-hub-username: From 4393af743d92fbb9beb60ea294336c6040644e88 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Thu, 5 Dec 2024 09:13:18 -0700 Subject: [PATCH 38/41] Use latest server version in pipeline --- .github/actions/run-ee-server/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/run-ee-server/action.yml b/.github/actions/run-ee-server/action.yml index 1ebe53f5..6bb20e67 100644 --- a/.github/actions/run-ee-server/action.yml +++ b/.github/actions/run-ee-server/action.yml @@ -5,13 +5,13 @@ description: 'Run EE server. Returns once server is ready. Only tested on Linux inputs: # All inputs in composite actions are strings use-server-rc: - required: true + required: false description: Deploy server release candidate? default: 'false' server-tag: required: true description: Specify Docker tag - default: '8.0.0.0-rc1' + default: 'latest' # Github Composite Actions can't access secrets # so we need to pass them in as inputs docker-hub-username: From 3a09953c41ce048349f5b7cfe084cd51c078a859 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Thu, 5 Dec 2024 13:29:13 -0700 Subject: [PATCH 39/41] Remove txn instance from copied write policy when adding MRT monitor keys --- .github/workflows/tests.yml | 2 +- AerospikeClient/Async/AsyncTxnAddKeys.cs | 8 ++++++-- AerospikeClient/Async/AsyncTxnMonitor.cs | 3 ++- AerospikeClient/Command/TxnAddKeys.cs | 6 ++++-- AerospikeClient/Command/TxnMonitor.cs | 3 ++- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 03d3209a..6a762fad 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -32,7 +32,7 @@ jobs: - uses: ./.github/actions/run-ee-server with: - use-server-rc: true + use-server-rc: false docker-hub-username: ${{ secrets.DOCKER_HUB_BOT_USERNAME }} docker-hub-password: ${{ secrets.DOCKER_HUB_BOT_PW }} diff --git a/AerospikeClient/Async/AsyncTxnAddKeys.cs b/AerospikeClient/Async/AsyncTxnAddKeys.cs index d797db74..1f61021e 100644 --- a/AerospikeClient/Async/AsyncTxnAddKeys.cs +++ b/AerospikeClient/Async/AsyncTxnAddKeys.cs @@ -21,17 +21,20 @@ public sealed class AsyncTxnAddKeys : AsyncWriteBase { private readonly RecordListener listener; private readonly OperateArgs args; + private readonly Txn txn; public AsyncTxnAddKeys ( AsyncCluster cluster, RecordListener listener, Key key, - OperateArgs args + OperateArgs args, + Txn txn ) : base(cluster, args.writePolicy, key) { this.listener = listener; this.args = args; + this.txn = txn; } public AsyncTxnAddKeys(AsyncTxnAddKeys other) @@ -39,6 +42,7 @@ public AsyncTxnAddKeys(AsyncTxnAddKeys other) { this.listener = other.listener; this.args = other.args; + this.txn = other.txn; } protected internal override AsyncCommand CloneCommand() @@ -54,7 +58,7 @@ protected internal override void WriteBuffer() protected internal override bool ParseResult() { ParseHeader(); - ParseTxnDeadline(policy.Txn); + ParseTxnDeadline(txn); if (resultCode == ResultCode.OK) { diff --git a/AerospikeClient/Async/AsyncTxnMonitor.cs b/AerospikeClient/Async/AsyncTxnMonitor.cs index 566e9800..e5901cac 100644 --- a/AerospikeClient/Async/AsyncTxnMonitor.cs +++ b/AerospikeClient/Async/AsyncTxnMonitor.cs @@ -139,6 +139,7 @@ private AsyncTxnMonitor(AsyncCommand command, AsyncCluster cluster) void Execute(AsyncCluster cluster, Policy policy, Operation[] ops) { + Txn txn = policy.Txn; Key txnKey = TxnMonitor.GetTxnMonitorKey(policy.Txn); WritePolicy wp = TxnMonitor.CopyTimeoutPolicy(policy); @@ -146,7 +147,7 @@ void Execute(AsyncCluster cluster, Policy policy, Operation[] ops) // Add write key(s) to MRT monitor. OperateArgs args = new(wp, null, null, ops); - AsyncTxnAddKeys txnCommand = new(cluster, txnListener, txnKey, args); + AsyncTxnAddKeys txnCommand = new(cluster, txnListener, txnKey, args, txn); txnCommand.Execute(); } diff --git a/AerospikeClient/Command/TxnAddKeys.cs b/AerospikeClient/Command/TxnAddKeys.cs index fafe93d4..51d55c3f 100644 --- a/AerospikeClient/Command/TxnAddKeys.cs +++ b/AerospikeClient/Command/TxnAddKeys.cs @@ -20,11 +20,13 @@ namespace Aerospike.Client public sealed class TxnAddKeys : SyncWriteCommand { private readonly OperateArgs args; + private readonly Txn txn; - public TxnAddKeys (Cluster cluster, Key key, OperateArgs args) + public TxnAddKeys (Cluster cluster, Key key, OperateArgs args, Txn txn) : base(cluster, args.writePolicy, key) { this.args = args; + this.txn = txn; } protected internal override void WriteBuffer() @@ -35,7 +37,7 @@ protected internal override void WriteBuffer() protected internal override void ParseResult(Connection conn) { ParseHeader(conn); - ParseTxnDeadline(policy.Txn); + ParseTxnDeadline(txn); if (resultCode == ResultCode.OK) { diff --git a/AerospikeClient/Command/TxnMonitor.cs b/AerospikeClient/Command/TxnMonitor.cs index ed6d164e..b0826e3b 100644 --- a/AerospikeClient/Command/TxnMonitor.cs +++ b/AerospikeClient/Command/TxnMonitor.cs @@ -133,10 +133,11 @@ private static Operation[] GetTxnOps(Txn txn, List list) private static void AddWriteKeys(Cluster cluster, Policy policy, Operation[] ops) { + Txn txn = policy.Txn; Key txnKey = GetTxnMonitorKey(policy.Txn); WritePolicy wp = CopyTimeoutPolicy(policy); OperateArgs args = new(wp, null, null, ops); - TxnAddKeys cmd = new(cluster, txnKey, args); + TxnAddKeys cmd = new(cluster, txnKey, args, txn); cmd.Execute(); } From eeeae7e060958967bd23720ba4732ab93e7802da Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Thu, 5 Dec 2024 14:20:02 -0700 Subject: [PATCH 40/41] Set pipeline to run on PRs and changes to stage and master --- .github/actions/run-ee-server/action.yml | 2 +- .github/workflows/tests.yml | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/actions/run-ee-server/action.yml b/.github/actions/run-ee-server/action.yml index 6bb20e67..e0790508 100644 --- a/.github/actions/run-ee-server/action.yml +++ b/.github/actions/run-ee-server/action.yml @@ -5,7 +5,7 @@ description: 'Run EE server. Returns once server is ready. Only tested on Linux inputs: # All inputs in composite actions are strings use-server-rc: - required: false + required: true description: Deploy server release candidate? default: 'false' server-tag: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 6a762fad..97f8c607 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -1,12 +1,13 @@ name: Run tests # Trigger test workflow whenever: -# 1. Commits are pushed directly to the mrt branch +# 1. A pull request is updated (e.g with new commits) +# 2. Commits are pushed directly to the stage or master branch on: push: - branches: ["mrt"] + branches: ["stage*", "master*"] pull_request: - branches: ["mrt"] + branches: ["stage*", "master*"] types: [ # Default triggers opened, @@ -32,7 +33,7 @@ jobs: - uses: ./.github/actions/run-ee-server with: - use-server-rc: false + use-server-rc: use-server-rc: ${{ contains(github.event.pull_request.labels.*.name, 'new-server-features') }} docker-hub-username: ${{ secrets.DOCKER_HUB_BOT_USERNAME }} docker-hub-password: ${{ secrets.DOCKER_HUB_BOT_PW }} From ff3146c95558a1786a2ddb69ff12c919de1d2759 Mon Sep 17 00:00:00 2001 From: Shannon Klaus Date: Thu, 5 Dec 2024 14:22:47 -0700 Subject: [PATCH 41/41] Fix syntax error in yml --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 97f8c607..9cfaf6c7 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -33,7 +33,7 @@ jobs: - uses: ./.github/actions/run-ee-server with: - use-server-rc: use-server-rc: ${{ contains(github.event.pull_request.labels.*.name, 'new-server-features') }} + use-server-rc: ${{ contains(github.event.pull_request.labels.*.name, 'new-server-features') }} docker-hub-username: ${{ secrets.DOCKER_HUB_BOT_USERNAME }} docker-hub-password: ${{ secrets.DOCKER_HUB_BOT_PW }}