-
Notifications
You must be signed in to change notification settings - Fork 18
/
cdm-detailed.properties
470 lines (445 loc) · 32 KB
/
cdm-detailed.properties
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#**********************************************************************************************************
#**********************************************************************************************************
#
# This .properties file is meant to be a fairly self-documenting configuration file for a Cassandra
# Data Migrator table migration. The document is divided into sections, with each section containing
# a brief description of the parameters within the section, as well as a description of each parameter.
#
# Following the description are the parameters themselves. Parameters that are required or are otherwise
# commonly configured have been commented in with some example values provided. Other parameters have
# been left commented out.
#
# Only those parameters that are commented in will be processed, any other values will either use a
# default value (specfied in the parameter description) or will be ignored.
#
#**********************************************************************************************************
#**********************************************************************************************************
#===========================================================================================================
# Common connection parameters for Origin and Target. TLS (SSL) parameters are configured later.
# Connections are made with the Spark Cassandra Connector; conventions there likely work here
# See https://github.com/datastax/spark-cassandra-connector/blob/master/doc/reference.md
#
# spark.cdm.connect.origin
# spark.cdm.connect.target
# .host : Default is localhost. Hostname/IP address of the cluster; may be a comma-separated list,
# and can follow the <hostname>:<port> convention.
# .port : Default is 9042. Port number to use if not specified on .host
# .scb : Secure Connect Bundle, used to connect to Astra Database. Default is not set
# .username : Default is cassandra. Username (or client_id) used to authenticate
# .password : Default is cassandra. Password (or client_secret) used to authenticate
#
# You must set either .host or .scb.
#-----------------------------------------------------------------------------------------------------------
spark.cdm.connect.origin.host localhost
spark.cdm.connect.origin.port 9042
#spark.cdm.connect.origin.scb file:///aaa/bbb/secure-connect-enterprise.zip
spark.cdm.connect.origin.username cassandra
spark.cdm.connect.origin.password cassandra
spark.cdm.connect.target.host localhost
spark.cdm.connect.target.port 9042
#spark.cdm.connect.target.scb file:///aaa/bbb/secure-connect-enterprise.zip
spark.cdm.connect.target.username cassandra
spark.cdm.connect.target.password cassandra
#===========================================================================================================
# Details about the Origin Schema
#
# Required Parameters:
# spark.cdm.schema.origin
# .keyspaceTable : <keyspace>.<table_name> of the table to be migrated. Table must exist in Origin.
#
# Recommended Parameters:
# spark.cdm.schema.origin
# .column
# .ttl
# .automatic : Default is true, unless .ttl.names is specified. When true, the TTL of the
# target record will be determined by finding the maximum TTL of
# all origin columns that can have TTL set (which excludes partition key,
# clustering key, collections and UDTs). When false, and .names is not set, the target
# record will have the TTL determined by the target table configuration.
#
# *** Note spark.cdm.transform.custom.ttl overrides this setting ***
#
# .names : Default is empty, meaning they will be determined automatically if that is set
# (see above). Specify a subset of eligible columns that are used to calculate
# the TTL of the target record.
# .writetime
# .automatic : Default is true, unless .writetime.names is specified. When true, the WRITETIME of
# the target record will be determined by finding the maximum WRITETIME of
# all origin columns that can have WRITETIME set (which excludes partition key,
# clustering key, collections and UDTs). When false, and .names is not set, the target
# record will have the WRITETIME determined by the target table configuration.
#
# *** Note spark.cdm.transform.custom.writetime overrides this setting ***
#
# .names : Default is empty, meaning they will be determined automatically if that is set
# (see above). Specify a subset of eligible columns that are used to calculate
# the WRITETIME of the target record.
#
# Other Parameters:
# spark.cdm.schema
# .origin.column
# .names.to.target : Default is empty. If column names are changed between Origin and Target, then
# this map-like list provides a mechanism to associate the two. The format is
# origin_column_name:target_column_name. The list is comma-separated. Only renamed
# columns need to be listed.
#
# .ttlwritetime.calc
# .useCollections : Default is false. When true, TTL and WRITETIME max calculations will include
# collections and UDTs. This is useful when the only non-PK columns are collections
# and/or UDTs.
#-----------------------------------------------------------------------------------------------------------
spark.cdm.schema.origin.keyspaceTable keyspace_name.table_name
# Max TTL value of all non-PK columns will be used for insert on target
spark.cdm.schema.origin.column.ttl.automatic true
# Max TTL value of specified non-PK columns will be used for insert on target (overrides automatic setting)
#spark.cdm.schema.origin.column.ttl.names data_col1,data_col2,...
# Max WRITETIME value of all non-PK columns will be used for insert on target
spark.cdm.schema.origin.column.writetime.automatic true
# Max WRITETIME value of specified non-PK columns will be used for insert on target (overrides automatic setting)
#spark.cdm.schema.origin.column.writetime.names data_col1,data_col2,...
#spark.cdm.schema.origin.column.names.to.target partition_col1:partition_col_1,partition_col2:partition_col_2,...
spark.cdm.schema.ttlwritetime.calc.useCollections false
#===========================================================================================================
# Details about the Target Schema
#
# Other Parameters:
# spark.cdm.schema.target
# .keyspaceTable : <keyspace>.<table_name> of the table to be migrated. Table must exist in Target.
# Default is origin.keyspaceTable.
#-----------------------------------------------------------------------------------------------------------
#spark.cdm.schema.target.keyspaceTable keyspace_name.table_name
#===========================================================================================================
# Autocorrection parameters allow CDM to correct data differences found between Origin and Target when
# running the DiffData program. Typically, these are run disabled, which will generate a list of data
# discrepancies. The reasons for these discrepancies can then be investigated, and if necessary the
# parameters can be enabled.
#
# Recommended Parameters:
# spark.cdm.autocorrect
# .missing : Default is false. when true, data that is missing in Target but is found in
# Origin will be re-migrated.
# .mismatch : Default is false. when true, data that is different between Origin and Target
# will be reconciled. It is important to note that TIMESTAMP will have an effect
# here - if the WRITETIME of the Origin record (determined with
# .writetime.names) is earlier than the WRITETIME of the Target record, the
# change will not appear in Target. This may be particularly challenging to
# troubleshoot if individual columns (cells) have been modified in Target.
#
# Other Parameters:
# spark.cdm.autocorrect
# .missing.counter : Default is false. By default, Counter tables are not copied when missing,
# unless explicitly set. This might seem an awkward thing (i.e. why not just use
# .missing), but the purpose of this is to 'strongly encourage' the user to
# consider what is happening. Scenario: Counter column in Target is at a value of
# 5323. The corresponding counter in Origin is also 5323. At some point, the Target
# counter gets DELETEd. Should the .missing record be re-inserted before
# the DELETE gets tombstoned, the counter will zombie back to life, and the
# counter will become 5323+5323 = 10646.
#-----------------------------------------------------------------------------------------------------------
spark.cdm.autocorrect.missing false
spark.cdm.autocorrect.mismatch false
spark.cdm.autocorrect.missing.counter false
#===========================================================================================================
# Track CDM run by token-ranges such that it can be stopped, resumed at a later time including reruning
# failed token-ranges. This feature is useful when the migration is expected to take a long time and the
# process could be killed (for any reasons) or needs to be stopped and resumed.
# When a previousRunId is provided, the current CDM run will start from where the previous run left off.
#
# spark.cdm.trackRun : Default is false. When enabled it will track each run using an entry (identified by
# `table_name` and a unique `run_id`) in `cdm_run_info` table in the target keyspace.
# It will also keep details of each token-range in the `cdm_run_details` table.
# .previousRunId : Default is none (i.e. 0). When a non-zero value is provided, it will analyze the
# results of the previous-run and only process the token-ranges that were not
# successful (`status != PASS`). A token-range may not be marked as 'PASS' for several
# reasons including if the job get killed, or some token-ranges fail due to load on
# the cluster (origin or target) or any other reasons.
# .runId : Default is an auto generated unique long value. When a non-zero value is provided,
# it will be used as a custom and unique identifier for the current run. Note the value
# of this id must be numeric and can be any java `long` unique value. This can be used
# by wrapper scripts to pass a known `runId` and then use it to query the
# `cdm_run_info` and `cdm_run_details` tables.
#-----------------------------------------------------------------------------------------------------------
spark.cdm.trackRun false
spark.cdm.trackRun.previousRunId 0
spark.cdm.trackRun.runId <auto-generated-unique-long-value>
#===========================================================================================================
# Performance and Operations Parameters affecting throughput and similar concerns.
#
# Recommended Parameters:
# spark.cdm.perfops
# .numParts : Defaults is 5000. In standard operation, the full token range (-2^63..2^63-1)
# is divided into a number of parts which will be parallel-processed. You should
# aim for each part to be ~10MB of data to migrate. The default is assuming the
# average table size is ~50GB (10MB * 5K), if the table size is significantly
# less or more than that, adjust this value accordingly.
# .batchSize : Defaults is 5. When writing to Target, this comprises the number of records that
# will be put into an UNLOGGED batch. CDM will tend to work on the same partition
# at a time so if your partition sizes are larger, this number may be increased.
# If .batchSize would mean that more than 1 partition is often contained in a batch,
# the figure should be reduced. Ideally < 1% of batches have more than 1 partition.
# For tables where primary-key=partition-key OR average row-size larger than 20 KB,
# always set this value to 1.
# .ratelimit
# .origin : Defaults to 20000. Concurrent number of operations across all parallel threads
# from Origin. This may be adjusted up (or down), depending on the amount of data
# and the processing capacity of the Origin cluster.
# .target : Defaults to 20000. Concurrent number of operations across all parallel threads
# from Target. This may be adjusted up (or down), depending on the amount of data
# and the processing capacity of the Target cluster.
#
# Other Parameters:
# spark.cdm.perfops
# .consistency : The .consistency parameters may be one of:
# ANY, ONE, TWO, THREE, QUORUM, LOCAL_ONE, EACH_QUORUM, LOCAL_QUORUM, SERIAL, LOCAL_SERIAL, ALL
# .read : Default is LOCAL_QUORUM. Read consistency from Origin, and also from Target
# when records are read for comparison purposes.
# .write : Default is LOCAL_QUORUM. Write consistency to Target.
# .printStatsAfter : Default is 100000. Number of rows of processing after which a progress log
# entry will be made.
# .printStatsPerPart : Default is false. Print statistics for each part after it is processed.
# .fetchSizeInRows : Default is 1000. This affects the frequency of reads from Origin, and also the
# frequency of flushes to Target. A larger value will reduce the number of reads
# and writes, but will increase the memory requirements.
#-----------------------------------------------------------------------------------------------------------
spark.cdm.perfops.numParts 5000
spark.cdm.perfops.batchSize 5
spark.cdm.perfops.ratelimit.origin 20000
spark.cdm.perfops.ratelimit.target 20000
#spark.cdm.perfops.consistency.read LOCAL_QUORUM
#spark.cdm.perfops.consistency.write LOCAL_QUORUM
#spark.cdm.perfops.printStatsAfter 100000
#spark.cdm.perfops.printStatsPerPart false
#spark.cdm.perfops.fetchSizeInRows 1000
#spark.cdm.perfops.errorLimit 0
#===========================================================================================================
# Transformation Parameters
#
# spark.cdm.transform
# .missing.key.ts.replace.value : Timestamp value in milliseconds.
# Partition and clustering columns cannot have null values, but if these
# are added as part of a schema transformation between Origin and Target
# it is possible that the Origin side is null. In this case, the
# Migrate data operation would fail. This parameter allows a crude
# constant value to be used in its place, separate from the Constant
# Values feature.
# .custom
# .writetime Default is 0 (disabled). Timestamp value in microseconds to use as the
# WRITETIME for the target record. This is useful when the WRITETIME of
# the record in Origin cannot be determined (such as the only non-key
# columns are collections). This parameter allows a crude constant value
# to be used in its place, and overrides
# .schema.origin.column.writetime.names.
# .writetime.incrementBy Default is 0. This is useful when you have a List that is not frozen,
# and are updating this via the autocorrect feature. Lists are not idempotent,
# and subsequent UPSERTs would add duplicates to the list. Future versions
# of CDM may tombstone the previous list, but for now this solution is
# viable and, crucially, more performant.
# .ttl Default is 0 (no expiry). Time-to-live value in seconds to use as the
# TTL for the target record. This is useful when the TTL of the record in
# Origin cannot be determined (such as the only non-key columns are
# collections) OR is a new TTL needs to be set during migration. This
# parameter allows a crude constant value to be used in its place, and
# overrides .schema.origin.column.ttl.names
# .codecs Default is empty. A comma-separated list of additional codecs to
# enable. Current codecs are:
# INT_STRING : int stored in a String
# DOUBLE_STRING : double stored in a String
# BIGINT_STRING : bigint stored in a String
# DECIMAL_STRING : decimal stored in a String
# STRING_BLOB : TEXT stored in a Blob
# ASCII_BLOB : ASCII stored in a Blob
# TIMESTAMP_STRING_MILLIS : timestamp stored in a String,
# as Epoch milliseconds
# TIMESTAMP_STRING_FORMAT : timestamp stored in a String,
# with a custom format
#
# *** NOTE where there are multiple type pair options, such as with
# *** TIMESTAMP_STRING, only one can be configured at a time.
#
# .codecs.timestamp Configuration for CQL_TIMESTAMP_TO_STRING_FORMAT codec.
# .string.format Default is yyyyMMddHHmmss ; DateTimeFormatter.ofPattern(formatString)
# .string.zone Default is UTC ; Must be in ZoneRulesProvider.getAvailableZoneIds()
#
# .map.remove.null.value Default is false. Setting this to true will remove any entries in Map
# field with empty or null values. Such values can create NPE exception
# if the value type does not support empty or null values (like Timestamp)
# and this property is false. Set it to true to handle such exceptions.
#-----------------------------------------------------------------------------------------------------------
#spark.cdm.transform.missing.key.ts.replace.value 1685577600000
#spark.cdm.transform.custom.writetime 0
#spark.cdm.transform.custom.writetime.incrementBy 0
#spark.cdm.transform.custom.ttl 0
#spark.cdm.transform.codecs
#spark.cdm.transform.codecs.timestamp.string.format yyyyMMddHHmmss
#spark.cdm.transform.codecs.timestamp.string.zone UTC
#spark.cdm.transform.map.remove.null.value false
#===========================================================================================================
# Cassandra Filters are applied on the coordinator node. Note that, depending on the filter, the coordinator
# node may need to do a lot more work than is normal.
#
# spark.cdm.filter.cassandra
# .partition
# .min : Default is 0 (when using RandomPartitioner) and -9223372036854775808 (-2^63)
# otherwise. Lower partition bound (inclusive).
# .max : Default is 2^127-1 (when using RandomPartitioner) and 9223372036854775807
# (2^63-1) otherwise. Upper partition bound (inclusive).
# .whereCondition : CQL added to the WHERE clause of SELECTs from origin
#-----------------------------------------------------------------------------------------------------------
#spark.cdm.filter.cassandra.partition.min -9223372036854775808
#spark.cdm.filter.cassandra.partition.max 9223372036854775807
#spark.cdm.filter.cassandra.whereCondition
#===========================================================================================================
# Java Filters are applied on the client node. Data must be pulled from the origin cluster and then filtered,
# but this may have a lower impact on the production cluster than the Cassandra Filters. Java filters put
# load onto the Cassandra Data Migrator processing node, by sending more data from Cassandra.
# Cassandra filters put load on the Cassandra nodes, notably because Cassandra Data Migrator specifies
# ALLOW FILTERING, which could cause the coordinator node to perform a lot more work.
#
# spark.cdm.filter.java
# .token.percent : Percent (between 1 and 100) of the token in each Split that will be migrated.
# This is used to do a wide and random sampling of the data. This is applied to each
# split. Invalid percentages will be treated as 100.
#
# .writetime : Filter records based on their writetimes. The maximum writetime of the columns
# configured at .schema.origin.column.writetime.names will be compared to the
# thresholds, which must be in microseconds since the epoch. If the .writetime.names
# are not specified, or the thresholds are null or otherwise invalid, the filter will
# be ignored. Note that .perfops.batchSize will be ignored when this filter is in place,
# a value of 1 will be used instead.#
# .min : Lowest (inclusive) writetime values to be migrated
# .max : Highest (inclusive) writetime values to be migrated
# maximum timestamp of the columns specified by .schema.origin.column.writetime.names,
# and if that is not specified, or is for some reason null, the filter is ignored.
#
# .column : Filter rows based on matching a configured value.
# .name : Column name against which the .value is compared. Must be on the column list
# specified at .schema.origin.column.names. The column value will be converted to a
# String, trimmed of whitespace on both ends, and compared.
# .value : String value to use as comparison. Whitespace on the ends of .value will be trimmed.
#-----------------------------------------------------------------------------------------------------------
#spark.cdm.filter.java.token.percent 100
#spark.cdm.filter.java.writetime.min 0
#spark.cdm.filter.java.writetime.max 9223372036854775807
#spark.cdm.filter.java.column.name
#spark.cdm.filter.java.column.value
#===========================================================================================================
# Constant Columns Feature allows you to add/remove/replace constant columns to the target table
# If 'add' is used, .names, .types, and .values must be specified and have the same length.
# For 'remove', you dont need to use this feature, just the target table should not have the column you plan
# to remove
# 'replace' is a combination of 'add' and 'remove' feature i.e. the origin column you plan to replace, should
# not be present on the target as well as the target column you plan to replace it with, should be present
#
# spark.cdm.feature.constantColumns
# .names comma-separated list of column names
# .types comma-separated list of column types (CQL types)
# .values separated list of hard-coded values; the value should be as you would use on CQLSH command line
# e.g. 'abcd' for a string, 1234 for an int, etc.
# .splitRegex defaults to comma, but can be any regex that works with String.split(regex); this is needed
# because some type values contain commas, e.g. lists, maps, sets, etc.
#-----------------------------------------------------------------------------------------------------------
#spark.cdm.feature.constantColumns.names const1,const2
#spark.cdm.feature.constantColumns.types
#spark.cdm.feature.constantColumns.values 'abcd',1234
#spark.cdm.feature.constantColumns.splitRegex ,
#===========================================================================================================
# Explode Map Feature allows you convert a source table Map into multiple target table records
#
# spark.cdm.feature.explodeMap
# .origin.name Name of the map column, must be on .schema.origin.column.names
# and the corresponding type on .schema.origin.column.types must be a map.
# .target.name.key Name of the column on the target table that will hold the map key. This key
# must be present on the Target primary key ..schema.target.column.id.names.
# .target.name.value Name of the column on the Target table that will hold the map value.
#-----------------------------------------------------------------------------------------------------------
#spark.cdm.feature.explodeMap.origin.name my_map
#spark.cdm.feature.explodeMap.target.name.key my_map_key
#spark.cdm.feature.explodeMap.target.name.value my_map_value
#===========================================================================================================
# The Extract JSON Feature enables extraction of a JSON value from a column containing JSON content in an
# Origin table and maps it to a specified column in the Target table.
#
# spark.cdm.feature.extractJson
# .exclusive Default is false. When set to true, only the columns specified below from the Origin
# and Target tables will be used, while all others will be ignored during Migration and
# Validation jobs.
#
# .originColumn Specifies the name of the Origin column that contains JSON content. This column must
# be of CQL type 'text'.
#
# .propertyMapping Defines the mapping between a JSON property in the Origin column and the corresponding
# Target column. The format should be `origin_json_propertyname:target_columnname`.
# - `origin_json_propertyname`: The name of the JSON property within the Origin column.
# - `target_columnname`: The name of the Target column where the extracted JSON value
# will be stored. The Target column can be of any primitive JSON type (String, Number,
# or Boolean) and must match the data type of the JSON value being extracted.
# - If the specified JSON property does not exist in the JSON content, the Target column
# will be set to null.
# Note: This feature currently supports extraction of only one JSON property.
#
# .overwrite Default is false. This property only applies to Validation run (NA for Migration)
# When set to true, the extracted JSON value will overwrite any existing value in the
# Target column during Validation. False will skip validation if the Target column has
# any non-null value.
#-----------------------------------------------------------------------------------------------------------
#spark.cdm.feature.extractJson.exclusive false
#spark.cdm.feature.extractJson.originColumn origin_columnname_with_json_content
#spark.cdm.feature.extractJson.propertyMapping origin_json_propertyname:target_columnname
#spark.cdm.feature.extractJson.overwrite false
#===========================================================================================================
# Guardrail feature manages records that exceed guardrail checks. The Guardrail job will generate a
# a report, other jobs will skip records that exceed the guardrail.
#
# spark.cdm.feature.guardrail
# .colSizeInKB Default 0, meaning the check is not done. Records with one or more fields that
# exceed this size will be flagged. Note this is kB (base 10), not kiB (base 2).
#
#===========================================================================================================
#spark.cdm.feature.guardrail.colSizeInKB 1000
#===========================================================================================================
# TLS (SSL) connection parameters, if so configured. Note that Secure Bundles embed these details.
# See https://github.com/datastax/spark-cassandra-connector/blob/master/doc/reference.md
#
# spark.cdm.connect.origin.tls
# spark.cdm.connect.target.tls
# .enabled : Default is false. Set to true if TLS is used.
# .trustStore
# .path : Filepath to the Java truststore file
# .type : Default is JKS
# .password : Password needed to open the truststore
# .keyStore
# .path : Filepath to the Java keystore file
# .password : Password needed to open the keystore
# .enabledAlgorithms : Default is TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
# .isAstra : Default is false. Set to true if connecting to DataStax Astra DB without SCB
#-----------------------------------------------------------------------------------------------------------
#spark.cdm.connect.origin.tls.enabled false
#spark.cdm.connect.origin.tls.trustStore.path
#spark.cdm.connect.origin.tls.trustStore.password
#spark.cdm.connect.origin.tls.trustStore.type JKS
#spark.cdm.connect.origin.tls.keyStore.path
#spark.cdm.connect.origin.tls.keyStore.password
#spark.cdm.connect.origin.tls.enabledAlgorithms TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
#spark.cdm.connect.origin.tls.isAstra false
#spark.cdm.connect.target.tls.enabled false
#spark.cdm.connect.target.tls.trustStore.path
#spark.cdm.connect.target.tls.trustStore.password
#spark.cdm.connect.target.tls.trustStore.type JKS
#spark.cdm.connect.target.tls.keyStore.path
#spark.cdm.connect.target.tls.keyStore.password
#spark.cdm.connect.target.tls.enabledAlgorithms TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
#spark.cdm.connect.target.tls.isAstra false