-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathapplication.conf
224 lines (194 loc) · 6.67 KB
/
application.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
webservice {
port = 8000
interface = 0.0.0.0
instance.name = "reference"
}
akka {
loggers = ["akka.event.slf4j.Slf4jLogger"]
actor {
default-dispatcher {
fork-join-executor {
# Number of threads = min(parallelism-factor * cpus, parallelism-max)
# Below are the default values set by Akka, uncomment to tune these
#parallelism-factor = 3.0
#parallelism-max = 64
}
}
}
dispatchers {
# A dispatcher for actors performing blocking io operations
# Prevents the whole system from being slowed down when waiting for responses from external resources for instance
io-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
# Using the forkjoin defaults, this can be tuned if we wish
}
# A dispatcher for actors handling API operations
# Keeps the API responsive regardless of the load of workflows being run
api-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
}
# A dispatcher for engine actors
# Because backends behaviour is unpredictable (potentially blocking, slow) the engine runs
# on its own dispatcher to prevent backends from affecting its performance.
engine-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
}
# A dispatcher used by supported backend actors
backend-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
}
# Note that without further configuration, all other actors run on the default dispatcher
}
}
spray.can {
server {
request-timeout = 40s
}
client {
request-timeout = 40s
connecting-timeout = 40s
}
}
system {
// If 'true', a SIGINT will trigger Cromwell to attempt to abort all currently running jobs before exiting
abort-jobs-on-terminate = false
// Max number of retries per job that the engine will attempt in case of a retryable failure received from the backend
max-retries = 10
// If 'true' then when Cromwell starts up, it tries to restart incomplete workflows
workflow-restart = true
// Cromwell will cap the number of running workflows at N
max-concurrent-workflows = 5000
// Cromwell will launch up to N submitted workflows at a time, regardless of how many open workflow slots exist
max-workflow-launch-count = 50
// Number of seconds between workflow launches
new-workflow-poll-rate = 20
// Since the WorkflowLogCopyRouter is initialized in code, this is the number of workers
number-of-workflow-log-copy-workers = 10
}
workflow-options {
// These workflow options will be encrypted when stored in the database
encrypted-fields: []
// AES-256 key to use to encrypt the values in `encrypted-fields`
base64-encryption-key: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="
// Directory where to write per workflow logs
workflow-log-dir: "cromwell-workflow-logs"
// When true, per workflow logs will be deleted after copying
workflow-log-temporary: true
// Workflow-failure-mode determines what happens to other calls when a call fails. Can be either ContinueWhilePossible or NoNewCalls.
// Can also be overridden in workflow options. Defaults to NoNewCalls. Uncomment to change:
//workflow-failure-mode: "ContinueWhilePossible"
}
// Optional call-caching configuration.
call-caching {
enabled = false
invalidate-bad-cache-results = false
}
engine {
// This instructs the engine which filesystems are at its disposal to perform any IO operation that it might need.
// For instance, WDL variables declared at the Workflow level will be evaluated using the filesystems declared here.
// If you intend to be able to run workflows with this kind of declarations:
// workflow {
// String str = read_string("gs://bucket/my-file.txt")
// }
// You will need to provide the engine with a gcs filesystem
// Note that the default filesystem (local) is always available.
//filesystems {
// gcs {
// auth = "application-default"
// }
//}
}
backend {
default = "LSF"
providers {
Local {
actor-factory = "cromwell.backend.impl.sfs.config.ConfigBackendLifecycleActorFactory"
config {
run-in-background = true
runtime-attributes = "String? docker"
submit = "/bin/bash ${script}"
submit-docker = "docker run --rm -v ${cwd}:${docker_cwd} -i ${docker} /bin/bash < ${script}"
// Root directory where Cromwell writes job results. This directory must be
// visible and writeable by the Cromwell process as well as the jobs that Cromwell
// launches.
root: "cromwell-executions"
filesystems {
local {
// Cromwell makes a link to your input files within <root>/<workflow UUID>/workflow-inputs
// The following are strategies used to make those links. They are ordered. If one fails
// The next one is tried:
//
// hard-link: attempt to create a hard-link to the file
// copy: copy the file
// soft-link: create a symbolic link to the file
//
// NOTE: soft-link will be skipped for Docker jobs
localization: [
"hard-link", "soft-link", "copy"
]
}
}
}
}
LSF {
actor-factory = "cromwell.backend.impl.sfs.config.ConfigBackendLifecycleActorFactory"
config {
runtime-attributes = """
Int cpu = 1
String? memory_gb
String? queue
String? project
String? docker_image
String? resource
String? job_group
"""
submit = """
bsub \
-J ${job_name} \
-cwd ${cwd} \
-o ${out} \
-e ${err} \
${"-a \"docker(" + docker_image + ")\""} \
${"-P " + project} \
${"-q " + queue} \
${"-M " + memory_gb} \
${"-n " + cpu} \
${"-R \"" + resource + "\""} \
${"-g \"" + job_group + "\""} \
/bin/bash ${script}
"""
kill = "bkill ${job_id}"
check-alive = "bjobs -noheader -o \"stat\" ${job_id} | /bin/grep 'PEND\\|RUN'"
job-id-regex = "Job <(\\d+)>.*"
}
}
}
}
services {
KeyValue {
class = "cromwell.services.keyvalue.impl.SqlKeyValueServiceActor"
}
MetadataService {
class = "cromwell.services.metadata.impl.MetadataServiceActor"
}
}
//database {
// This specifies which database to use
// config = main.mysql
// main {
// mysql {
// driver = "slick.driver.MySQLDriver$"
// db {
// driver = "com.mysql.jdbc.Driver"
// url = "jdbc:mysql://localhost:3306/cromwell?socket=/tmp/mysqld.sock"
// user = "cromwell"
// password = "test4cromwell"
// connectionTimeout = 5000
// }
// }
// }
//}