-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
Copy pathconfig.json
58 lines (58 loc) · 3.46 KB
/
config.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
// example unload/copy utility config file. please remove all comments '//' as they are invalid json
{
// the source database from which we'll export data
"unloadSource": {
"clusterEndpoint": "my-cluster.d7bdmd4addft.eu-west-1.redshift.amazonaws.com",
"clusterPort": 5439,
// base 64 encoded password for the user to UNLOAD data as. Use the encryptValue.sh utility to generate this string
"connectPwd": "my base 64 encoded password",
"connectUser": "master",
"db": "mydb",
"schemaName": "public",
//If defining more than one table name, it must be in a json array, not a comma separated list. The values must match tableNames in copyTarget.
"tableNames": [
"export_table1",
"export_table2"
],
//Optional list of columns to appear in the select statement. E.g. if you have autogenerated ids here list the other columns
//You can use any SQL or redshift built in functions that would be allowed in an UNLOAD statement
"columns": "column1,column2",
// Optional Unload Statement that overrides the default. Can be used to only copy some data
"unloadStatement": "unload ('SELECT {columns} FROM {schema_name}.{table_name} where received_timestamp > (getdate() - 30)') to '{dataStagingPath}.' credentials '{s3_access_credentials};master_symmetric_key={master_symmetric_key}' manifest encrypted gzip null as 'NULL_STRING__' delimiter '^' addquotes escape allowoverwrite"
},
// location and credentials for S3, which are used to store migrated data while in flight
"s3Staging": {
// Either use AWS IAM role or specify access_key / secret for s3 access. Read http://docs.aws.amazon.com/redshift/latest/mgmt/copy-unload-iam-role.html
// AWS IAM Role to use. If role is specified, access keys are ignored
"aws_iam_role": "aws iam role which is assigned to Redshift and has access to the s3 bucket",
// base 64 encoded AWS Access Key used to access S3. Use the encryptValue.sh utility to generate this string
"aws_access_key_id": "my base 64 encoded access key",
// base 64 encoded AWS Secret Access Key used to access S3. Use the encryptValue.sh utility to generate this string
"aws_secret_access_key": "my base 64 encoded secret key",
// path on S3 to use for storing in-flight data. The current date and time is appended to the prefix
"path": "s3://my-bucket/prefix/",
"deleteOnSuccess": "True",
// region to use for the S3 export
"region": "us-east-1"
},
// the destination database into which we will copy data
"copyTarget": {
"clusterEndpoint": "my-other-cluster.d7bdmd4addft.eu-west-1.redshift.amazonaws.com",
"clusterPort": 5439,
// base 64 encoded password for the user to COPY data as. Use the encryptValue.sh utility to generate this string
"connectPwd": "my base 64 encoded password",
"connectUser": "master",
"db": "mydb",
"schemaName": "public",
//If defining more than one table name, it must be in a json array, not a comma separated list. The values must match tableNames in unloadSource.
"tableNames": [
"export_table1",
"export_table2"
],
//Optional List of columns. Probably best used in conjunction with unloadSource.columns, unless the schemas are different and you know what you are doing
"columns": "column1,column2",
//Optinak Boolean indicating whether COPY command should use explicit_ids:
// https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-data-conversion.html#copy-explicit-ids
"explicit_ids": true
}
}