diff --git a/.gitignore b/.gitignore index a189507116..eaa79556a0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,15 @@ **/node_modules **/npm-debug.log ui/dist +.gradle +.project +bin +build +client/python/conductor.egg-info +*.pyc +.classpath +docs/site +site +ui/.settings +.settings +dump.rdb diff --git a/.travis.yml b/.travis.yml index f639390d51..2fbef631e0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,6 +3,10 @@ jdk: - oraclejdk8 install: true script: "./buildViaTravis.sh" +addons: + apt: + packages: + - redis-server cache: directories: - "$HOME/.gradle/caches" @@ -13,4 +17,4 @@ env: - secure: brK+NGTogesfjqwHso/dK5wqO2LxEUo9gEtUZADr9UlFDzcIU6JomGjeZzeaCsOBlFbZ4p0IIRfivGCi7yegQPWad3cAlyoAQ3+0b+ZxiumbJu5SVVr32x5NxfHNvnW9zIqFIOA9A6GjNq2AkdfBrr9bAerPYc8RjbU4PkH/+CM4HDrx2m/6eXrEbtElCi7IfdRLH9wu6D9/2ANdpK7bCjY2S9sMBvDUsUzGmcoUnJBdInjPYxL1tmAQlAMgWW8E/vKVdyjKq6JsYpwuVnztHlMryrXVQglwrbXtB0gl4Qvqdv0kXAlTf76wQsViEOIvoJV63o/cnFG2lZbVAJ+JGE7cCRaQpIzDf0il5XDkF86XQjaqWpfeEQu7CNj2yjXItn/2q2HaMu4uoEQwQSifRo0n44S7WOSkrZcYly4/Hax9SjiVvDDimlVqp0fURNpo41SMtlW0jXWIYEstft+0vWtPpwzHd9mWEqCQiXkDoAYpjPpfQFpcwFLAi+JO+4Y1Yhuw8NBMHTIDOzjbEwRo06yO/9pYICmg34a1mVAOTdAhpXR8HfTbFlTd80Xm3kYLmrOZrj5yWvP1+XgLDnMFEqw3nHYHQYGWKaVWs5OfYlkhWrpinMVdciLJEp20fUudiSfO7zcbjOXbN5Gg7E0X9kdIbKG9/h+m9buHJuD8QPA= - secure: W03DuzGYB2tpW6cJJgeFwG7urNPxSbNrrDk3kOApDb216woJ9BVSBGF1Jhhje6o7yYK9k2C2z02ulMNRnfkZ4Zt5WrsiD5zljXKM9G5BOy4zMVesEj93hRq99pfMiidH4pd6N1SZpFCeybxHIIuGHl43lCeDlgxxvpavsnoRwwDLGeRdiMlSB2uChAa9j0CmPr28cYB1r1iXpQPyOjgApI7TzRB42+j+pR0GmZWdCUbKpUPeyg13jQ3d4udgRSPG7b2jUTdrSiVkOD71d/25tmLNWygt2O+mUfp6cKDxZrYpD+V6MFIxHd5AWg9Z3KY/QBUizPKAvpKNDQ8pVj6yqsePYShl4IpTUhzbeFkATvSNXZyzSlmHXkAwkO7Gb8yOOvFqbH8cSqfXqNtjBIoP8WnA4caeY1ZCQ8ec0IpIc3nqng/lTk89hJ+vlmmj1h2G0Yh7syaNxNd7+yno5BXoLXlobACPMUYeHifEjtzcngM9i91m9yFviv6n6WGTnbSz4QTB0Pr5IEzIrOAudBPS4MijR/PmsgEa5l1tiCSWiTf2VJwMcB7g5tAzZqGX/wp4C6A/gbfPUutZBbeVnFCzGP5f9R0QtOOZm07cmN1IoO9+uBvPI62K3TQefgIF0/XKfiRhGKnhwdgZl5RZwN0WkAPVEjoWYXY4QSAZg99H88E= - secure: VAHbP/8nTAIl2UuT++C/BfSfBDxJPZOEgbCQcCyUpHsFa8SdstuB5Le6VZYaAzcs7wR9WFIHP6+llJyg76p1OhxHC/iG+5QFSqKSkA+RkPyBAmtNTw+Pt5i/0MMxNbBrlogPvFoGe9/wighYQKNwK3In431PSh5n4sEiXPc4XVSzaP+Qxpd1g4VQwQV950JTx97QMLwnR1RNNz/LhBaisE7XdTM561znmqhcRmfGZY7dlhdZxMp+60ngutIZUfSekFLY2nYecoWZv6kEMBxEMnnGBYamCUy856TIVgzGAtD5VScSiRxkwawBKN1OsgvEfwxg/duCTZ9GkQ1LFwxjNDX7bVUo3DsjlqteyJ8n1bh3oYlKgFN6XRiC/Tz8fh66N94AFM8+dc9aJFyBlPBPW1MxxjS+4Y9l3cHxTvyoixguKSHdOypT2PdWkWWSIPGE6j6S33sUJyJuuA/Eq4pG4bd1OfXcjdw+/UJlkLsb3p+ojPhlFqDtRlFTLeS2Mz565EOs/jTzUjuQFNrz3f4Ht+1JpWq9To8KjHzRelRxWR183cikTD9SCDRTQlBlMXcMJHXAasssU5BFr6ZprulKI4UNiU0b3CCVlofDiL/Zd/788TDyqCX/pqI/YmK62zP/EWxOZTCdbfbYetu/+b4c5z//ygfLbw2j3bmtB8ojnE0= - - secure: jiEHSPnbGaejrl6I9Aj4ZOmunzwBtLtnYLggB6W2KBVj115QLRTr2E/SkXrHINWLksV98oPs8J6E6v/LSJ7YwMQssyPmO2UjhakFNZCZpUIYeo+l8vP9LKRZhTbhav9dOG80RUIXUzqJl48GjaFrChYzdzNSXEwBhVqS/cPbEkfxZ+bPnPsuUseLjd/pFbn09CJduqhUWqv9OzjVa0cTjnVGIBDoqWp69p5M2Q8Kpf4wMsZ/gn1oww20YE/XpDrxo1bZyNLbPwsqRSK5lnwG8uqgohkFYAJfIzoriXK74pEPqqp99zmAIO8otdKeEVU6EA6NoK6LzAUa/6l8sa2cxcxNU6bbVEC/IbAWQYWGRDrUa0fNWYaNF/2aMSKbXCgH/KQQnBR8laVlNhhXArxUJGBaygSrLPL12l53tSAXPoPD6jYABtkPPkW95jyp4Zu7LrmjRCNJN/qMXl/DOl306WKzBHnftBeeICsFw6AEkoSHIEIrEJpk/jN1uLWhoOmE6o7sEn6mwVhq4/DqqCGnZZez6RwwqQ2Hiq2Agf7LXEzt5lfm3dKkaxVw4mFuieMWcxmrXYEe9MtrYwdUzssse/p5x2a+SeDgoSg2w17ZNoTUJD6ZSgxMuYJEIPzXgISqZh+ln3ZO0+Raa5yVALhrVY/FCKCuPhwDESE9i65MVlY= \ No newline at end of file + - secure: jiEHSPnbGaejrl6I9Aj4ZOmunzwBtLtnYLggB6W2KBVj115QLRTr2E/SkXrHINWLksV98oPs8J6E6v/LSJ7YwMQssyPmO2UjhakFNZCZpUIYeo+l8vP9LKRZhTbhav9dOG80RUIXUzqJl48GjaFrChYzdzNSXEwBhVqS/cPbEkfxZ+bPnPsuUseLjd/pFbn09CJduqhUWqv9OzjVa0cTjnVGIBDoqWp69p5M2Q8Kpf4wMsZ/gn1oww20YE/XpDrxo1bZyNLbPwsqRSK5lnwG8uqgohkFYAJfIzoriXK74pEPqqp99zmAIO8otdKeEVU6EA6NoK6LzAUa/6l8sa2cxcxNU6bbVEC/IbAWQYWGRDrUa0fNWYaNF/2aMSKbXCgH/KQQnBR8laVlNhhXArxUJGBaygSrLPL12l53tSAXPoPD6jYABtkPPkW95jyp4Zu7LrmjRCNJN/qMXl/DOl306WKzBHnftBeeICsFw6AEkoSHIEIrEJpk/jN1uLWhoOmE6o7sEn6mwVhq4/DqqCGnZZez6RwwqQ2Hiq2Agf7LXEzt5lfm3dKkaxVw4mFuieMWcxmrXYEe9MtrYwdUzssse/p5x2a+SeDgoSg2w17ZNoTUJD6ZSgxMuYJEIPzXgISqZh+ln3ZO0+Raa5yVALhrVY/FCKCuPhwDESE9i65MVlY= diff --git a/client/build.gradle b/client/build.gradle index 039945e51c..b8c443b358 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -5,5 +5,6 @@ dependencies { compile 'org.slf4j:slf4j-api:1.7.+' compile 'com.netflix.spectator:spectator-api:0.40.0' compile 'com.netflix.eureka:eureka-client:latest.release' - compile 'com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.7.5' + compile 'com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.7.5' + compile 'com.netflix.archaius:archaius-core:0.7.5' } diff --git a/client/src/main/java/com/netflix/conductor/client/task/WorkflowTaskCoordinator.java b/client/src/main/java/com/netflix/conductor/client/task/WorkflowTaskCoordinator.java index fe825b642a..dd96bc5d50 100644 --- a/client/src/main/java/com/netflix/conductor/client/task/WorkflowTaskCoordinator.java +++ b/client/src/main/java/com/netflix/conductor/client/task/WorkflowTaskCoordinator.java @@ -15,6 +15,8 @@ */ package com.netflix.conductor.client.task; +import java.net.InetAddress; +import java.net.UnknownHostException; import java.util.Arrays; import java.util.HashMap; import java.util.LinkedList; @@ -46,13 +48,11 @@ /** * * @author Viren - * + * Manages the Task workers thread pool and server communication (poll, task update and acknowledgement). */ public class WorkflowTaskCoordinator { private static final Logger logger = LoggerFactory.getLogger(WorkflowTaskCoordinator.class); - - private int threadCount; private TaskClient client; @@ -63,47 +63,38 @@ public class WorkflowTaskCoordinator { private EurekaClient ec; private List workers = new LinkedList<>(); + + private int sleepWhenRetry; - /** - * 1 second - */ - private int pollInterval = 1000; - - /** - * 500 ms - */ - private int sleepWhenRetry = 500; - - private int updateRetryCount = 3; + private int updateRetryCount; - private int workerQueueSize = 100; + private int workerQueueSize; - /** - * - * @param ec Eureka client - used to identify if the server is in discovery or not. When the server goes out of discovery, the polling is terminated. If passed null, discovery check is not done. - * @param client Task client used to communicate to conductor server. - * @param threadCount # of threads assigned to the workers. Should be at-least the size of taskWorkers to avoid starvation in a busy system. - * @param taskWorkers workers that will be used for polling work and task execution. - * Please see {@link #init()} method. The method must be called after this constructor for the polling to start. - */ - public WorkflowTaskCoordinator(EurekaClient ec, TaskClient client, int threadCount, Worker...taskWorkers) { - this(ec, client, threadCount, Arrays.asList(taskWorkers)); - } + private int threadCount; + private static Map> environmentData = new HashMap<>(); /** * * @param ec Eureka client - used to identify if the server is in discovery or not. When the server goes out of discovery, the polling is terminated. If passed null, discovery check is not done. - * @param client TaskClient used to communicate to the conductor server + * @param client TaskClient used to communicate to the Conductor server * @param threadCount # of threads assigned to the workers. Should be at-least the size of taskWorkers to avoid starvation in a busy system. + * @param sleepWhenRetry sleep time in millisecond for Conductor server retries (poll, ack, update task) + * @param updateRetryCount number of times to retry the failed updateTask operation + * @param workerQueueSize queue size for the polled task. * @param taskWorkers workers that will be used for polling work and task execution. - * + *

* Please see {@link #init()} method. The method must be called after this constructor for the polling to start. + *

+ * @see Builder */ - public WorkflowTaskCoordinator(EurekaClient ec, TaskClient client, int threadCount, Iterable taskWorkers) { + public WorkflowTaskCoordinator(EurekaClient ec, TaskClient client, int threadCount, int sleepWhenRetry, int updateRetryCount, int workerQueueSize, Iterable taskWorkers) { this.ec = ec; this.client = client; this.threadCount = threadCount; + this.sleepWhenRetry = sleepWhenRetry; + this.updateRetryCount = updateRetryCount; + this.workerQueueSize = workerQueueSize; for (Worker worker : taskWorkers) { registerWorker(worker); } @@ -111,50 +102,136 @@ public WorkflowTaskCoordinator(EurekaClient ec, TaskClient client, int threadCou /** * - * @param pollInterval polling interval in millisecond. - * @return Returns the current instance. - */ - public WorkflowTaskCoordinator withPollInterval(int pollInterval) { - this.pollInterval = pollInterval; - return this; - } - - /** - * - * @param sleepWhenRetry time in millisecond, for which the thread should sleep when task update call fails, before retrying the operation. - * @return Returns the current instance. + * Builder used to create the instances of WorkflowTaskCoordinator + * */ - public WorkflowTaskCoordinator withSleepWhenRetry(int sleepWhenRetry) { - this.sleepWhenRetry = sleepWhenRetry; - return this; - } + public static class Builder { - /** - * - * @param updateRetryCount # of attempts to be made when updating task status when update status call fails. - * @return Returns the current instance. - * @see #withSleepWhenRetry(int) - */ - public WorkflowTaskCoordinator withUpdateRetryCount(int updateRetryCount) { - this.updateRetryCount = updateRetryCount; - return this; + private int sleepWhenRetry = 500; + + private int updateRetryCount = 3; + + private int workerQueueSize = 100; + + private int threadCount = -1; + + private Iterable taskWorkers; + + private EurekaClient ec; + + private TaskClient client; + + /** + * + * @param sleepWhenRetry time in millisecond, for which the thread should sleep when task update call fails, before retrying the operation. + * @return Returns the current instance. + */ + public Builder withSleepWhenRetry(int sleepWhenRetry) { + this.sleepWhenRetry = sleepWhenRetry; + return this; + } + + /** + * + * @param updateRetryCount # of attempts to be made when updating task status when update status call fails. + * @return Builder instance + * @see #withSleepWhenRetry(int) + */ + public Builder withUpdateRetryCount(int updateRetryCount) { + this.updateRetryCount = updateRetryCount; + return this; + } + + /** + * + * @param workerQueueSize Worker queue size. + * @return Builder instance + */ + public Builder withWorkerQueueSize(int workerQueueSize) { + this.workerQueueSize = workerQueueSize; + return this; + } + + /** + * + * @param threadCount # of threads assigned to the workers. Should be at-least the size of taskWorkers to avoid starvation in a busy system. + * @return Builder instance + */ + public Builder withThreadCount(int threadCount) { + this.threadCount = threadCount; + return this; + } + + /** + * + * @param client Task Client used to communicate to Conductor server + * @return Builder instance + */ + public Builder withTaskClient(TaskClient client) { + this.client = client; + return this; + } + + /** + * + * @param ec Eureka client + * @return Builder instance + */ + public Builder withEurekaClient(EurekaClient ec) { + this.ec = ec; + return this; + } + + /** + * + * @param taskWorkers workers that will be used for polling work and task execution. + * @return Builder instance + */ + public Builder withWorkers(Iterable taskWorkers) { + this.taskWorkers = taskWorkers; + return this; + } + + /** + * + * @param taskWorkers workers that will be used for polling work and task execution. + * @return Builder instance + */ + public Builder withWorkers(Worker... taskWorkers) { + this.taskWorkers = Arrays.asList(taskWorkers); + return this; + } + + /** + * + * @return Builds an instance of WorkflowTaskCoordinator and returns. + *

+ * Please see {@link WorkflowTaskCoordinator#init()} method. The method must be called after this constructor for the polling to start. + *

+ */ + public WorkflowTaskCoordinator build() { + if(taskWorkers == null) { + throw new IllegalArgumentException("No task workers are specified. use withWorkers() to add one mor more task workers"); + } + + if(client == null) { + throw new IllegalArgumentException("No TaskClient provided. use withTaskClient() to provide one"); + } + return new WorkflowTaskCoordinator(ec, client, threadCount, sleepWhenRetry, updateRetryCount, workerQueueSize, taskWorkers); + } } - /** - * - * @param workerQueueSize Worker queue size. - * @return Returns the current instance. - */ - public WorkflowTaskCoordinator withWorkerQueueSize(int workerQueueSize) { - this.workerQueueSize = workerQueueSize; - return this; - } - /** * Starts the polling */ public synchronized void init() { + if(threadCount == -1) { + threadCount = workers.size(); + } + + logger.info("Initialized the worker with {} threads", threadCount); + AtomicInteger count = new AtomicInteger(0); this.es = new ThreadPoolExecutor(threadCount, threadCount, 0L, TimeUnit.MILLISECONDS, @@ -170,7 +247,8 @@ public Thread newThread(Runnable r) { }); this.ses = Executors.newScheduledThreadPool(workers.size()); workers.forEach(worker -> { - ses.scheduleWithFixedDelay(()->pollForTask(worker), pollInterval, pollInterval, TimeUnit.MILLISECONDS); + environmentData.put(worker, getEnvData(worker)); + ses.scheduleWithFixedDelay(()->pollForTask(worker), worker.getPollingInterval(), worker.getPollingInterval(), TimeUnit.MILLISECONDS); }); } @@ -183,7 +261,6 @@ public Thread newThread(Runnable r) { */ public void registerWorker(Worker worker) { workers.add(worker); - this.threadCount++; } private void pollForTask(Worker worker) { @@ -249,12 +326,12 @@ private void execute(Worker worker, Task task) { } TaskResult result = new TaskResult(task); - result.getLog().getEnvironment().putAll(getEnvData(worker)); + result.getLog().getEnvironment().putAll(environmentData.get(worker)); Stopwatch sw = WorkflowTaskMetrics.executionTimer(worker.getTaskDefName()); try { - logger.debug("Executing task {} on worker {}", task, worker.getClass().getSimpleName()); + logger.debug("Executing task {} on worker {}", task, worker.getClass().getSimpleName()); result = worker.execute(task); } catch (Exception e) { @@ -278,19 +355,57 @@ private void execute(Worker worker, Task task) { } - private Map getEnvData(Worker worker) { - String props = worker.getLoggingEnvProps(); + /** + * + * @return Thread Count for the executor pool + */ + public int getThreadCount() { + return threadCount; + } + + /** + * + * @return Size of the queue used by the executor pool + */ + public int getWorkerQueueSize() { + return workerQueueSize; + } + + /** + * + * @return sleep time in millisecond before task update retry is done when receiving error from the Conductor server + */ + public int getSleepWhenRetry() { + return sleepWhenRetry; + } + + /** + * + * @return Number of times updateTask should be retried when receiving error from Conductor server + */ + public int getUpdateRetryCount() { + return updateRetryCount; + } + + static Map getEnvData(Worker worker) { + List props = worker.getLoggingEnvProps(); Map data = new HashMap<>(); - if(props == null || props.trim().length() == 0) { + if(props == null || props.isEmpty()) { return data; - } - String[] properties = props.split(","); + } String workerName = worker.getTaskDefName(); - for(String property : properties) { - String value = PropertyFactory.getString(workerName, property, System.getenv(property)); + for(String property : props) { + property = property.trim(); + String defaultValue = System.getenv(property); + String value = PropertyFactory.getString(workerName, property, defaultValue); data.put(property, value); } + try { + data.put("HOSTNAME", InetAddress.getLocalHost().getHostName()); + } catch (UnknownHostException e) { + + } return data; } @@ -316,7 +431,4 @@ private void updateWithRetry(int count, Task task, TaskResult result, Worker wor } } } - - - } diff --git a/client/src/main/java/com/netflix/conductor/client/worker/Worker.java b/client/src/main/java/com/netflix/conductor/client/worker/Worker.java index 95d3fcb6e7..fc04b3f00e 100644 --- a/client/src/main/java/com/netflix/conductor/client/worker/Worker.java +++ b/client/src/main/java/com/netflix/conductor/client/worker/Worker.java @@ -17,6 +17,8 @@ import java.net.InetAddress; import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.List; import java.util.function.Function; import com.netflix.conductor.common.metadata.tasks.Task; @@ -95,10 +97,19 @@ public default int getPollCount() { /** * - * @return Returns a comma separated list of environment variables that should be logged + * @return Interval in millisecond at which the server should be polled for worker tasks. */ - public default String getLoggingEnvProps() { - return PropertyFactory.getString(getTaskDefName(), "taskLogProps", "HOSTNAME,USER,EC2_INSTANCE_ID,@environment,@stack"); + public default int getPollingInterval() { + return PropertyFactory.getInteger(getTaskDefName(), "pollInterval", 1000); + } + + /** + * + * @return Returns a list of environment or system variables that should be logged + */ + public default List getLoggingEnvProps() { + String keys = PropertyFactory.getString(getTaskDefName(), "taskLogProps", "HOSTNAME,USER,EC2_INSTANCE_ID"); + return Arrays.asList(keys.split(",")); } /** * diff --git a/client/src/test/java/com/netflix/conductor/client/sample/Main.java b/client/src/test/java/com/netflix/conductor/client/sample/Main.java index 37a80d2207..a1c2ad10b8 100644 --- a/client/src/test/java/com/netflix/conductor/client/sample/Main.java +++ b/client/src/test/java/com/netflix/conductor/client/sample/Main.java @@ -21,7 +21,6 @@ import com.netflix.conductor.client.http.TaskClient; import com.netflix.conductor.client.task.WorkflowTaskCoordinator; import com.netflix.conductor.client.worker.Worker; -import com.netflix.discovery.EurekaClient; /** * @author Viren @@ -31,8 +30,6 @@ public class Main { public static void main(String[] args) { - EurekaClient eurekaClient = null; //Optional and can be null - TaskClient taskClient = new TaskClient(); taskClient.setRootURI("http://localhost:8080/api/"); //Point this to the server API @@ -41,8 +38,9 @@ public static void main(String[] args) { Worker worker1 = new SampleWorker("task_1"); Worker worker2 = new SampleWorker("task_5"); - //Initialize the task coordinator - WorkflowTaskCoordinator coordinator = new WorkflowTaskCoordinator(eurekaClient , taskClient, threadCount, worker1, worker2); + //Create WorkflowTaskCoordinator + WorkflowTaskCoordinator.Builder builder = new WorkflowTaskCoordinator.Builder(); + WorkflowTaskCoordinator coordinator = builder.withWorkers(worker1, worker2).withThreadCount(threadCount).withTaskClient(taskClient).build(); //Start for polling and execution of the tasks coordinator.init(); diff --git a/client/src/test/java/com/netflix/conductor/client/task/WorkflowTaskCoordinatorTests.java b/client/src/test/java/com/netflix/conductor/client/task/WorkflowTaskCoordinatorTests.java new file mode 100644 index 0000000000..bec09212e0 --- /dev/null +++ b/client/src/test/java/com/netflix/conductor/client/task/WorkflowTaskCoordinatorTests.java @@ -0,0 +1,92 @@ +/** + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * + */ +package com.netflix.conductor.client.task; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.junit.Test; + +import com.netflix.conductor.client.http.TaskClient; +import com.netflix.conductor.client.worker.Worker; +import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.common.metadata.tasks.TaskResult; + +/** + * @author Viren + * + */ +public class WorkflowTaskCoordinatorTests { + + @Test + public void testLoggingEnvironment() { + Worker worker = Worker.create("test", (Task task)-> new TaskResult(task)); + List keys = worker.getLoggingEnvProps(); + + Map env = WorkflowTaskCoordinator.getEnvData(worker); + assertNotNull(env); + assertTrue(!env.isEmpty()); + Set loggedKeys = env.keySet(); + for(String key : keys) { + assertTrue(loggedKeys.contains(key)); + } + } + + @Test(expected=IllegalArgumentException.class) + public void testNoWorkersException() { + new WorkflowTaskCoordinator.Builder().build(); + } + + @Test + public void testThreadPool() { + + Worker worker = Worker.create("test", (Task task)-> new TaskResult(task)); + WorkflowTaskCoordinator coordinator = new WorkflowTaskCoordinator.Builder().withWorkers(worker, worker, worker).withTaskClient(new TaskClient()).build(); + assertEquals(-1, coordinator.getThreadCount()); //Not initialized yet + coordinator.init(); + assertEquals(3, coordinator.getThreadCount()); + assertEquals(100, coordinator.getWorkerQueueSize()); //100 is the default value + assertEquals(500, coordinator.getSleepWhenRetry()); + assertEquals(3, coordinator.getUpdateRetryCount()); + + + coordinator = new WorkflowTaskCoordinator.Builder() + .withWorkers(worker) + .withThreadCount(100) + .withWorkerQueueSize(400) + .withSleepWhenRetry(100) + .withUpdateRetryCount(10) + .withTaskClient(new TaskClient()) + .build(); + assertEquals(100, coordinator.getThreadCount()); + coordinator.init(); + assertEquals(100, coordinator.getThreadCount()); + assertEquals(400, coordinator.getWorkerQueueSize()); + assertEquals(100, coordinator.getSleepWhenRetry()); + assertEquals(10, coordinator.getUpdateRetryCount()); + + + + } +} diff --git a/client/src/test/java/com/netflix/conductor/client/worker/TestPropertyFactory.java b/client/src/test/java/com/netflix/conductor/client/worker/TestPropertyFactory.java index bf2f4e7748..48ca3a8f5a 100644 --- a/client/src/test/java/com/netflix/conductor/client/worker/TestPropertyFactory.java +++ b/client/src/test/java/com/netflix/conductor/client/worker/TestPropertyFactory.java @@ -23,7 +23,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import org.junit.BeforeClass; import org.junit.Test; import com.netflix.conductor.common.metadata.tasks.Task; @@ -34,20 +33,6 @@ * */ public class TestPropertyFactory { - - @BeforeClass - public static void init() { - - //Polling interval for all the workers is 2 second - System.setProperty("conductor.worker.pollingInterval", "2"); - - System.setProperty("conductor.worker.paused", "false"); - System.setProperty("conductor.worker.workerA.paused", "true"); - - System.setProperty("conductor.worker.workerB.batchSize", "84"); - - System.setProperty("conductor.worker.Test.paused", "true"); - } @Test public void testIdentity(){ diff --git a/client/src/test/resources/config.properties b/client/src/test/resources/config.properties new file mode 100644 index 0000000000..eb652cb17a --- /dev/null +++ b/client/src/test/resources/config.properties @@ -0,0 +1,5 @@ +conductor.worker.pollingInterval=2 +conductor.worker.paused=false +conductor.worker.workerA.paused=true +conductor.worker.workerB.batchSize=84 +conductor.worker.Test.paused=true diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/Task.java b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/Task.java index 88c2b14286..ffef9d5f5e 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/Task.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/Task.java @@ -499,18 +499,24 @@ public void setOutputData(Map outputData) { public Task copy() { Task copy = new Task(); - copy.setCallbackAfterSeconds(getCallbackAfterSeconds()); + copy.setCallbackAfterSeconds(callbackAfterSeconds); copy.setCallbackFromWorker(callbackFromWorker); copy.setCorrelationId(correlationId); copy.setDynamicWorkflowTask(dynamicWorkflowTask); copy.setInputData(inputData); - copy.setOutputData(getOutputData()); + copy.setOutputData(outputData); copy.setReferenceTaskName(referenceTaskName); copy.setStartDelayInSeconds(startDelayInSeconds); copy.setTaskDefName(taskDefName); copy.setTaskType(taskType); - copy.setWorkflowInstanceId(getWorkflowInstanceId()); + copy.setWorkflowInstanceId(workflowInstanceId); copy.setResponseTimeoutSeconds(responseTimeoutSeconds); + copy.setStatus(status); + copy.setRetryCount(retryCount); + copy.setPollCount(pollCount); + copy.setTaskId(taskId); + copy.setReasonForIncompletion(reasonForIncompletion); + copy.setWorkerId(workerId); return copy; } diff --git a/common/src/main/java/com/netflix/conductor/common/run/Workflow.java b/common/src/main/java/com/netflix/conductor/common/run/Workflow.java index ff3a4ca842..2bc9307523 100644 --- a/common/src/main/java/com/netflix/conductor/common/run/Workflow.java +++ b/common/src/main/java/com/netflix/conductor/common/run/Workflow.java @@ -74,6 +74,8 @@ public boolean isSuccessful(){ private String reasonForIncompletion; private int schemaVersion; + + private String event; public Workflow(){ @@ -269,6 +271,23 @@ public int getSchemaVersion() { public void setSchemaVersion(int schemaVersion) { this.schemaVersion = schemaVersion; } + + /** + * + * @return Name of the event that started the workflow + */ + public String getEvent() { + return event; + } + + /** + * + * @param event Name of the event that started the workflow + */ + public void setEvent(String event) { + this.event = event; + } + @Override public String toString() { return workflowType + "." + version + "/" + workflowId + "." + status; diff --git a/common/src/main/java/com/netflix/conductor/common/run/WorkflowSummary.java b/common/src/main/java/com/netflix/conductor/common/run/WorkflowSummary.java index cbbfda6e18..6b7b062d86 100644 --- a/common/src/main/java/com/netflix/conductor/common/run/WorkflowSummary.java +++ b/common/src/main/java/com/netflix/conductor/common/run/WorkflowSummary.java @@ -59,6 +59,8 @@ public class WorkflowSummary { private long executionTime; + private String event; + public WorkflowSummary() { } @@ -87,6 +89,7 @@ public WorkflowSummary(Workflow workflow) { if(workflow.getEndTime() > 0){ this.executionTime = workflow.getEndTime() - workflow.getStartTime(); } + this.event = workflow.getEvent(); } /** @@ -174,4 +177,20 @@ public long getExecutionTime(){ public String getUpdateTime() { return updateTime; } + + /** + * + * @return The event + */ + public String getEvent() { + return event; + } + + /** + * + * @param event The event + */ + public void setEvent(String event) { + this.event = event; + } } diff --git a/core/src/main/java/com/netflix/conductor/core/events/ActionProcessor.java b/core/src/main/java/com/netflix/conductor/core/events/ActionProcessor.java index c9d8b14c3f..37e798cdeb 100644 --- a/core/src/main/java/com/netflix/conductor/core/events/ActionProcessor.java +++ b/core/src/main/java/com/netflix/conductor/core/events/ActionProcessor.java @@ -66,7 +66,7 @@ public ActionProcessor(WorkflowExecutor executor, MetadataService metadata) { this.metadata = metadata; } - public Map execute(Action action, String payload) throws Exception { + public Map execute(Action action, String payload, String event, String messageId) throws Exception { logger.debug("Executing {}", action.getAction()); Object jsonObj = om.readValue(payload, Object.class); @@ -76,13 +76,13 @@ public Map execute(Action action, String payload) throws Excepti switch (action.getAction()) { case start_workflow: - Map op = startWorkflow(action, jsonObj); + Map op = startWorkflow(action, jsonObj, event, messageId); return op; case complete_task: - op = completeTask(action, jsonObj, action.getComplete_task(), Status.COMPLETED); + op = completeTask(action, jsonObj, action.getComplete_task(), Status.COMPLETED, event, messageId); return op; case fail_task: - op = completeTask(action, jsonObj, action.getFail_task(), Status.FAILED); + op = completeTask(action, jsonObj, action.getFail_task(), Status.FAILED, event, messageId); return op; default: break; @@ -91,7 +91,7 @@ public Map execute(Action action, String payload) throws Excepti } - private Map completeTask(Action action, Object payload, TaskDetails taskDetails, Status status) { + private Map completeTask(Action action, Object payload, TaskDetails taskDetails, Status status, String event, String messageId) { Map input = new HashMap<>(); input.put("workflowId", taskDetails.getWorkflowId()); @@ -114,6 +114,9 @@ private Map completeTask(Action action, Object payload, TaskDeta task.setStatus(status); task.setOutputData(replaced); + task.getOutputData().put("__event", event); + task.getOutputData().put("__messageId", messageId); + try { executor.updateTask(new TaskResult(task)); } catch (Exception e) { @@ -123,7 +126,7 @@ private Map completeTask(Action action, Object payload, TaskDeta return replaced; } - private Map startWorkflow(Action action, Object payload) throws Exception { + private Map startWorkflow(Action action, Object payload, String event, String messageId) throws Exception { StartWorkflow params = action.getStart_workflow(); Map op = new HashMap<>(); try { @@ -131,9 +134,11 @@ private Map startWorkflow(Action action, Object payload) throws WorkflowDef def = metadata.getWorkflowDef(params.getName(), params.getVersion()); Map inputParams = params.getInput(); Map workflowInput = pu.replace(inputParams, payload); - String id = executor.startWorkflow(def.getName(), def.getVersion(), params.getCorrelationId(), workflowInput); + workflowInput.put("__messageId", messageId); + String id = executor.startWorkflow(def.getName(), def.getVersion(), params.getCorrelationId(), workflowInput, event); op.put("workflowId", id); + }catch(Exception e) { logger.error(e.getMessage(), e); op.put("error", e.getMessage()); diff --git a/core/src/main/java/com/netflix/conductor/core/events/EventProcessor.java b/core/src/main/java/com/netflix/conductor/core/events/EventProcessor.java index 2bd160871b..fe3cfcb4d9 100644 --- a/core/src/main/java/com/netflix/conductor/core/events/EventProcessor.java +++ b/core/src/main/java/com/netflix/conductor/core/events/EventProcessor.java @@ -202,7 +202,7 @@ private Future execute(EventExecution ee, Action action, String payload) { try { logger.debug("Executing {} with payload {}", action.getAction(), payload); - Map output = ap.execute(action, payload); + Map output = ap.execute(action, payload, ee.getEvent(), ee.getMessageId()); if(output != null) { ee.getOutput().putAll(output); } diff --git a/core/src/main/java/com/netflix/conductor/core/events/EventQueues.java b/core/src/main/java/com/netflix/conductor/core/events/EventQueues.java index 48ad5fe825..f62dddaccb 100644 --- a/core/src/main/java/com/netflix/conductor/core/events/EventQueues.java +++ b/core/src/main/java/com/netflix/conductor/core/events/EventQueues.java @@ -27,6 +27,7 @@ import org.slf4j.LoggerFactory; import com.netflix.conductor.core.events.queue.ObservableQueue; +import com.netflix.conductor.core.execution.ParametersUtils; /** * @author Viren @@ -36,6 +37,8 @@ public class EventQueues { private static Logger logger = LoggerFactory.getLogger(EventQueues.class); + private static ParametersUtils pu = new ParametersUtils(); + public enum QueueType { sqs, conductor } @@ -54,7 +57,8 @@ public static List providers() { return providers.values().stream().map(p -> p.getClass().getName()).collect(Collectors.toList()); } - public static ObservableQueue getQueue(String event, boolean throwException) { + public static ObservableQueue getQueue(String eventt, boolean throwException) { + String event = pu.replace(eventt).toString(); String typeVal = event.substring(0, event.indexOf(':')); String queueURI = event.substring(event.indexOf(':') + 1); QueueType type = QueueType.valueOf(typeVal); diff --git a/core/src/main/java/com/netflix/conductor/core/execution/DeciderService.java b/core/src/main/java/com/netflix/conductor/core/execution/DeciderService.java index 9f3eed419b..73d7a3c5d1 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/DeciderService.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/DeciderService.java @@ -48,16 +48,15 @@ import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.common.run.Workflow.WorkflowStatus; -import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask; import com.netflix.conductor.core.utils.IDGenerator; -import com.netflix.conductor.dao.ExecutionDAO; import com.netflix.conductor.dao.MetadataDAO; import com.netflix.conductor.metrics.Monitors; /** * @author Viren * @author Vikram - * + * Decider evaluates the state of the workflow by inspecting the current state along with the blueprint. + * The result of the evaluation is either to schedule further tasks, complete/fail the workflow or do nothing. */ public class DeciderService { @@ -67,202 +66,140 @@ public class DeciderService { private MetadataDAO metadata; - private ExecutionDAO edao; - private ObjectMapper om; private ParametersUtils pu = new ParametersUtils(); @Inject - public DeciderService(MetadataDAO metadata, ExecutionDAO edao, ObjectMapper om) { + public DeciderService(MetadataDAO metadata, ObjectMapper om) { this.metadata = metadata; - this.edao = edao; this.om = om; } - - @VisibleForTesting - public DeciderService() { - } - - @VisibleForTesting - void setMetadata(MetadataDAO metadata) { - this.metadata = metadata; - } - - List startWorkflow(Workflow workflow, WorkflowDef def) throws Exception { - - logger.debug("Starting workflow " + def.getName() + "/" + workflow.getWorkflowId()); + public DeciderOutcome decide(Workflow workflow, WorkflowDef def) throws TerminateWorkflow { - List tasks = workflow.getTasks(); - // Check if the workflow is a re-run case - if (workflow.getReRunFromWorkflowId() == null || tasks.isEmpty()) { - if(def.getTasks().isEmpty()) { - //There are no tasks in a workflow - workflow.setStatus(WorkflowStatus.COMPLETED); - return Collections.emptyList(); - } - WorkflowTask taskToSchedule = def.getTasks().getFirst(); //Nothing is running yet - so schedule the first task - while(isTaskSkipped(taskToSchedule, workflow)){ - taskToSchedule = def.getNextTask(taskToSchedule.getTaskReferenceName()); - } - List toBeScheduled = getTasksToBeScheduled(def, workflow, taskToSchedule, 0, workflow.getStartTime()); - return toBeScheduled; - } - - // Get the first task to schedule - Task rerunFromTask = null; - for(Task t: tasks){ - if(t.getStatus().equals(Status.READY_FOR_RERUN)){ - rerunFromTask = t; - break; - } - } - if(rerunFromTask == null){ - String reason = String.format("The workflow %s is marked for re-run from %s but could not find the starting task", workflow.getWorkflowId(), workflow.getReRunFromWorkflowId()); - throw new TerminateWorkflow(reason); - } - rerunFromTask.setStatus(Status.SCHEDULED); - rerunFromTask.setRetried(true); - rerunFromTask.setRetryCount(0); - return Arrays.asList(rerunFromTask); - - } - - /** - * - * @param workflowId id of the workflow - * @param executor Workflow executor - * @return true if the workflow status was terminal. false otherwise. - * @throws Exception if there is an internal error - */ - public boolean decide(String workflowId, WorkflowExecutor executor) throws Exception { - - final Workflow workflow = edao.getWorkflow(workflowId, true); - final WorkflowDef def = metadata.get(workflow.getWorkflowType(), workflow.getVersion()); workflow.setSchemaVersion(def.getSchemaVersion()); - try { - - final List tasks = workflow.getTasks(); - List executedTasks = tasks.stream().filter(t -> !t.getStatus().equals(Status.SKIPPED) && !t.getStatus().equals(Status.READY_FOR_RERUN)).collect(Collectors.toList()); - List tasksToBeScheduled = new LinkedList<>(); - //If the task list is empty, then - if(executedTasks.isEmpty()){ - tasksToBeScheduled = startWorkflow(workflow, def); - if(workflow.getStatus().isTerminal()) { - edao.updateWorkflow(workflow); - } - if(tasksToBeScheduled == null) tasksToBeScheduled = new LinkedList<>(); - } - decide(def, workflow, tasksToBeScheduled, executor); - - if(workflow.getStatus().isTerminal()) { - return true; - } - - return false; - - } catch (TerminateWorkflow tw) { - terminate(def, workflow, tw, executor); - return true; - } + final List tasks = workflow.getTasks(); + List executedTasks = tasks.stream().filter(t -> !t.getStatus().equals(Status.SKIPPED) && !t.getStatus().equals(Status.READY_FOR_RERUN)).collect(Collectors.toList()); + List tasksToBeScheduled = new LinkedList<>(); + if (executedTasks.isEmpty()) { + tasksToBeScheduled = startWorkflow(workflow, def); + if(tasksToBeScheduled == null) tasksToBeScheduled = new LinkedList<>(); + } + return decide(def, workflow, tasksToBeScheduled); } - void decide(final WorkflowDef def, final Workflow workflow, List preScheduledTasks, WorkflowExecutor workflowProvider) throws Exception { + private DeciderOutcome decide(final WorkflowDef def, final Workflow workflow, List preScheduledTasks) throws TerminateWorkflow { - if (workflow.getStatus().equals(WorkflowStatus.PAUSED)) { - logger.debug("Workflow " + workflow.getWorkflowId() + " is paused"); - return; + DeciderOutcome outcome = new DeciderOutcome(); + + if (workflow.getStatus().equals(WorkflowStatus.PAUSED)) { + logger.debug("Workflow " + workflow.getWorkflowId() + " is paused"); + return outcome; } if (workflow.getStatus().isTerminal()) { - // you cannot evaluate a terminal workflow + //you cannot evaluate a terminal workflow logger.debug("Workflow " + workflow.getWorkflowId() + " is already finished. status=" + workflow.getStatus() + ", reason=" + workflow.getReasonForIncompletion()); - workflowProvider.cleanupFromPending(workflow); - return; + return outcome; } List pendingTasks = workflow.getTasks().stream().filter(t -> (!t.isRetried() && !t.getStatus().equals(Status.SKIPPED)) || SystemTaskType.isBuiltIn(t.getTaskType())).collect(Collectors.toList()); - boolean reeval = false; Set executedTaskRefNames = workflow.getTasks().stream() .filter(t -> !t.getStatus().equals(Status.SKIPPED) && !t.getStatus().equals(Status.READY_FOR_RERUN)) .map(t -> t.getReferenceTaskName()).collect(Collectors.toSet()); - List systemTasksExecuted = new LinkedList(); Map tasksToBeScheduled = new LinkedHashMap<>(); preScheduledTasks.forEach(pst -> { - executedTaskRefNames.remove(pst.getReferenceTaskName()); - tasksToBeScheduled.put(pst.getReferenceTaskName(), pst); - if(SystemTaskType.is(pst.getTaskType())){ - systemTasksExecuted.add(pst); - } - }); - - List update = new LinkedList<>(); + executedTaskRefNames.remove(pst.getReferenceTaskName()); + tasksToBeScheduled.put(pst.getReferenceTaskName(), pst); + }); for (Task task : pendingTasks) { - + if (SystemTaskType.is(task.getTaskType()) && !task.getStatus().isTerminal()) { - WorkflowSystemTask stt = WorkflowSystemTask.get(task.getTaskType()); - if (stt.execute(workflow, task, workflowProvider)) { - update.add(task); - reeval = true; - systemTasksExecuted.add(task); - } + tasksToBeScheduled.put(task.getReferenceTaskName(), task); + executedTaskRefNames.remove(task.getReferenceTaskName()); } + TaskDef taskDef = metadata.getTaskDef(task.getTaskDefName()); if(taskDef != null) { checkForTimeout(taskDef, task); } if (!task.getStatus().isSuccessful()) { - List retryTasks = shouldTaskRetry(def, taskDef, workflow, task); - retryTasks.forEach(rt -> { - tasksToBeScheduled.put(rt.getReferenceTaskName(), rt); - executedTaskRefNames.remove(rt.getReferenceTaskName()); - }); - workflow.getTasks().addAll(retryTasks); - update.add(task); + WorkflowTask workflowTask = def.getTaskByRefName(task.getReferenceTaskName()); + Task rt = retry(taskDef, workflowTask, task, workflow); + tasksToBeScheduled.put(rt.getReferenceTaskName(), rt); + executedTaskRefNames.remove(rt.getReferenceTaskName()); + outcome.tasksToBeUpdated.add(task); } if (!task.isRetried() && task.getStatus().isTerminal()) { task.setRetried(true); List nextTasks = getNextTask(def, workflow, task); nextTasks.forEach(rt -> tasksToBeScheduled.put(rt.getReferenceTaskName(), rt)); - update.add(task); + outcome.tasksToBeUpdated.add(task); logger.debug("Scheduling Tasks from " + task.getTaskDefName() + ", next = " + nextTasks.stream().map(t -> t.getTaskDefName()).collect(Collectors.toList())); } } List unScheduledTasks = tasksToBeScheduled.values().stream().filter(tt -> !executedTaskRefNames.contains(tt.getReferenceTaskName())).collect(Collectors.toList()); - int pushedToQueue = -1; if (!unScheduledTasks.isEmpty()) { logger.debug("Scheduling Tasks " + unScheduledTasks.stream().map(t -> t.getTaskDefName()).collect(Collectors.toList())); - pushedToQueue = workflowProvider.scheduleTask(unScheduledTasks); - workflow.getTasks().addAll(unScheduledTasks); + outcome.tasksToBeScheduled.addAll(unScheduledTasks); } - - systemTasksExecuted.forEach(t -> {update.add(t);}); - edao.updateTasks(update); - - if(checkForWorkflowCompletion(def, workflow)){ + + if (checkForWorkflowCompletion(def, workflow)) { logger.debug("Marking workflow as complete. workflow=" + workflow.getWorkflowId() + ", tasks=" + workflow.getTasks()); - workflowProvider.completeWorkflow(workflow); - }else{ - edao.updateWorkflow(workflow); + outcome.isComplete = true; } + + return outcome; + + } + + private List startWorkflow(Workflow workflow, WorkflowDef def) throws TerminateWorkflow { + + logger.debug("Starting workflow " + def.getName() + "/" + workflow.getWorkflowId()); - if (pushedToQueue == 0 || reeval) { - //Nothing was pushed to queue - need to re-evaluate workflow - decide(def, workflow, Collections.emptyList(), workflowProvider); + List tasks = workflow.getTasks(); + // Check if the workflow is a re-run case + if (workflow.getReRunFromWorkflowId() == null || tasks.isEmpty()) { + if(def.getTasks().isEmpty()) { + throw new TerminateWorkflow("No tasks found to be executed", WorkflowStatus.COMPLETED); + } + WorkflowTask taskToSchedule = def.getTasks().getFirst(); //Nothing is running yet - so schedule the first task + while (isTaskSkipped(taskToSchedule, workflow)) { + taskToSchedule = def.getNextTask(taskToSchedule.getTaskReferenceName()); + } + List toBeScheduled = getTasksToBeScheduled(def, workflow, taskToSchedule, 0, workflow.getStartTime()); + return toBeScheduled; + } + + // Get the first task to schedule + Task rerunFromTask = null; + for(Task t: tasks){ + if(t.getStatus().equals(Status.READY_FOR_RERUN)){ + rerunFromTask = t; + break; + } } + if (rerunFromTask == null) { + String reason = String.format("The workflow %s is marked for re-run from %s but could not find the starting task", workflow.getWorkflowId(), workflow.getReRunFromWorkflowId()); + throw new TerminateWorkflow(reason); + } + rerunFromTask.setStatus(Status.SCHEDULED); + rerunFromTask.setRetried(true); + rerunFromTask.setRetryCount(0); + return Arrays.asList(rerunFromTask); } - boolean checkForWorkflowCompletion(final WorkflowDef def, final Workflow workflow) throws Exception { + private boolean checkForWorkflowCompletion(final WorkflowDef def, final Workflow workflow) throws TerminateWorkflow { List allTasks = workflow.getTasks(); if (allTasks.isEmpty()) { @@ -303,34 +240,15 @@ boolean checkForWorkflowCompletion(final WorkflowDef def, final Workflow workflo return false; } - private void terminate(final WorkflowDef def, final Workflow workflow, TerminateWorkflow tw, WorkflowExecutor workflowProvider) throws Exception { - - if (!workflow.getStatus().isTerminal()) { - workflow.setStatus(tw.workflowStatus); - } - - String failureWorkflow = def.getFailureWorkflow(); - if (failureWorkflow != null) { - if (failureWorkflow.startsWith("$")) { - String[] paramPathComponents = failureWorkflow.split("\\."); - String name = paramPathComponents[2]; // name of the input parameter - failureWorkflow = (String) workflow.getInput().get(name); - } - } - if(tw.task != null){ - edao.updateTask(tw.task); - } - workflowProvider.terminateWorkflow(workflow, tw.getMessage(), failureWorkflow); - } - List getNextTask(WorkflowDef def, Workflow workflow, Task task) { // Get the following task after the last completed task if(SystemTaskType.is(task.getTaskType()) && SystemTaskType.DECISION.name().equals(task.getTaskType())){ - if(task.getInputData().get("hasChildren") != null){ + if (task.getInputData().get("hasChildren") != null) { return Collections.emptyList(); } } + String taskReferenceName = task.getReferenceTaskName(); WorkflowTask taskToSchedule = def.getNextTask(taskReferenceName); while (isTaskSkipped(taskToSchedule, workflow)) { @@ -344,7 +262,7 @@ List getNextTask(WorkflowDef def, Workflow workflow, Task task) { } - String getNextTasksToBeScheduled(WorkflowDef def, Workflow workflow, Task task) { + private String getNextTasksToBeScheduled(WorkflowDef def, Workflow workflow, Task task) { String taskReferenceName = task.getReferenceTaskName(); WorkflowTask taskToSchedule = def.getNextTask(taskReferenceName); @@ -355,8 +273,8 @@ String getNextTasksToBeScheduled(WorkflowDef def, Workflow workflow, Task task) } - - private List shouldTaskRetry(WorkflowDef def, TaskDef taskDef, Workflow workflow, Task task) throws Exception { + + private Task retry(TaskDef taskDef, WorkflowTask workflowTask, Task task, Workflow workflow) throws TerminateWorkflow { int retryCount = task.getRetryCount(); if (!task.getStatus().isRetriable() || SystemTaskType.isBuiltIn(task.getTaskType()) || taskDef.getRetryCount() <= retryCount) { @@ -375,28 +293,28 @@ private List shouldTaskRetry(WorkflowDef def, TaskDef taskDef, Workflow wo startDelay = taskDef.getRetryDelaySeconds() * (1 + task.getRetryCount()); break; } - task.setRetried(true); - WorkflowTask taskToSchedule = def.getTaskByRefName(task.getReferenceTaskName()); - if(taskToSchedule == null){ - taskToSchedule = task.getDynamicWorkflowTask(); - } - if(taskToSchedule == null){ - logger.warn("taskToSchedule is still null...." + task.getWorkflowInstanceId() + "/" + task.getTaskId()); - WorkflowStatus status = task.getStatus().equals(Status.TIMED_OUT) ? WorkflowStatus.TIMED_OUT : WorkflowStatus.FAILED; - throw new TerminateWorkflow(task.getReasonForIncompletion(), status, task); - } - taskToSchedule.setStartDelay(startDelay); - List tasksTobeScheduled = getTasksToBeScheduled(def, workflow, taskToSchedule, task.getRetryCount()+1, task.getEndTime(), null, task.getTaskId()); - tasksTobeScheduled.stream().filter(t -> t.getReferenceTaskName().equals(task.getReferenceTaskName())).forEach(tbs -> { - tbs.setInputData(task.getInputData()); - tbs.setDynamicWorkflowTask(task.getDynamicWorkflowTask()); - }); - return tasksTobeScheduled; + task.setRetried(true); + Task rescheduled = task.copy(); + rescheduled.setStartDelayInSeconds(startDelay); + rescheduled.setCallbackAfterSeconds(startDelay); + rescheduled.setRetryCount(task.getRetryCount() + 1); + rescheduled.setRetried(false); + rescheduled.setTaskId(IDGenerator.generate()); + rescheduled.setRetriedTaskId(task.getTaskId()); + rescheduled.setStatus(Status.SCHEDULED); + + if(workflowTask != null && workflow.getSchemaVersion() > 1) { //This is a valid case, for the dynamic fork/join + Map taskInput = pu.getTaskInputV2(workflowTask.getInputParameters(), workflow, rescheduled.getTaskId(), taskDef); + rescheduled.setInputData(taskInput); + } + return rescheduled; + } private void checkForTimeout(TaskDef taskType, Task task) { + if(taskType == null){ logger.warn("missing task type " + task.getTaskDefName() + ", workflowId=" + task.getWorkflowInstanceId()); return; @@ -431,8 +349,6 @@ private void checkForTimeout(TaskDef taskType, Task task) { } return; - - } private List getTasksToBeScheduled(WorkflowDef def, Workflow workflow, WorkflowTask taskToSchedule, int retryCount, long lastEventTime) { @@ -558,9 +474,10 @@ private List getTasksToBeScheduled(WorkflowDef def, Workflow workflow, Wor break; case EVENT: if(taskId == null) taskId = IDGenerator.generate(); + taskToSchedule.getInputParameters().put("sink", taskToSchedule.getSink()); Map eventTaskInput = pu.getTaskInputV2(taskToSchedule.getInputParameters(), workflow, taskId, null); - Task eventTask = SystemTask.eventTask(workflow.getWorkflowId(), taskId, - workflow.getCorrelationId(), taskToSchedule.getTaskReferenceName(), taskToSchedule.getSink(), eventTaskInput); + String sink = (String)eventTaskInput.get("sink"); + Task eventTask = SystemTask.eventTask(workflow.getWorkflowId(), taskId, workflow.getCorrelationId(), taskToSchedule.getTaskReferenceName(), sink, eventTaskInput); tasks.add(eventTask); break; case WAIT: @@ -743,4 +660,16 @@ private boolean isTaskSkipped(WorkflowTask taskToSchedule, Workflow workflow) { } } + + public static class DeciderOutcome { + + List tasksToBeScheduled = new LinkedList<>(); + + List tasksToBeUpdated = new LinkedList<>(); + + boolean isComplete; + + private DeciderOutcome() { } + + } } diff --git a/core/src/main/java/com/netflix/conductor/core/execution/ParametersUtils.java b/core/src/main/java/com/netflix/conductor/core/execution/ParametersUtils.java index d1b4d0446f..13b58402ca 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/ParametersUtils.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/ParametersUtils.java @@ -15,6 +15,7 @@ */ package com.netflix.conductor.core.execution; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -87,6 +88,11 @@ public Map replace(Map input, Object json) { return replace(input, io, null); } + public Object replace(String paramString){ + DocumentContext io = JsonPath.parse(Collections.emptyMap(), option); + return replaceVariables(paramString, io, null); + } + @SuppressWarnings("unchecked") private Map replace(Map input, DocumentContext io, String taskId) { for (Entry e : input.entrySet()) { diff --git a/core/src/main/java/com/netflix/conductor/core/execution/WorkflowExecutor.java b/core/src/main/java/com/netflix/conductor/core/execution/WorkflowExecutor.java index d10c211c08..cd2d3bddf3 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/WorkflowExecutor.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/WorkflowExecutor.java @@ -49,6 +49,7 @@ import com.netflix.conductor.core.WorkflowContext; import com.netflix.conductor.core.config.Configuration; import com.netflix.conductor.core.execution.ApplicationException.Code; +import com.netflix.conductor.core.execution.DeciderService.DeciderOutcome; import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask; import com.netflix.conductor.core.utils.IDGenerator; import com.netflix.conductor.dao.ExecutionDAO; @@ -82,14 +83,18 @@ public WorkflowExecutor(MetadataDAO metadata, ExecutionDAO edao, QueueDAO queue, this.edao = edao; this.queue = queue; this.config = config; - this.decider = new DeciderService(metadata, edao, om); + this.decider = new DeciderService(metadata, om); } public String startWorkflow(String name, int version, String correlationId, Map input) throws Exception { - return startWorkflow(name, version, input, correlationId, null, null); + return startWorkflow(name, version, correlationId, input, null); } - public String startWorkflow(String name, int version, Map input, String correlationId, String parentWorkflowId, String parentWorkflowTaskId) throws Exception { + public String startWorkflow(String name, int version, String correlationId, Map input, String event) throws Exception { + return startWorkflow(name, version, input, correlationId, null, null, event); + } + + public String startWorkflow(String name, int version, Map input, String correlationId, String parentWorkflowId, String parentWorkflowTaskId, String event) throws Exception { try { @@ -124,9 +129,9 @@ public String startWorkflow(String name, int version, Map input, wf.setCreateTime(System.currentTimeMillis()); wf.setUpdatedBy(null); wf.setUpdateTime(null); + wf.setEvent(event); edao.createWorkflow(wf); - queue.push(deciderQueue, wf.getWorkflowId(), config.getSweepFrequency()); //Let's check on this workflow in some time (sweep frequency) - decider.decide(workflowId, this); + decide(workflowId); return workflowId; }catch (Exception e) { @@ -204,8 +209,7 @@ public String rerun(RerunWorkflowRequest request) throws Exception { } edao.createWorkflow(wf); - queue.push(deciderQueue, wf.getWorkflowId(), config.getSweepFrequency()); //Let's check on this workflow in some time (sweep frequency) - decider.decide(workflowId, this); + decide(workflowId); return workflowId; } @@ -219,11 +223,12 @@ public void rewind(String workflowId) throws Exception { workflow.getTasks().forEach(t -> edao.removeTask(t.getTaskId())); workflow.getTasks().clear(); workflow.setReasonForIncompletion(null); + workflow.setStartTime(System.currentTimeMillis()); + workflow.setEndTime(0); // Change the status to running workflow.setStatus(WorkflowStatus.RUNNING); edao.updateWorkflow(workflow); - queue.push(deciderQueue, workflow.getWorkflowId(), config.getSweepFrequency()); //Let's check on this workflow in some time (sweep frequency) - decider.decide(workflowId, this); + decide(workflowId); } public void retry(String workflowId) throws Exception { @@ -262,7 +267,7 @@ public void retry(String workflowId) throws Exception { workflow.setStatus(WorkflowStatus.RUNNING); edao.updateWorkflow(workflow); - decider.decide(workflowId, this); + decide(workflowId); } @@ -311,9 +316,9 @@ public void completeWorkflow(Workflow wf) throws Exception { // care of this again! if (workflow.getParentWorkflowId() != null) { Workflow parent = edao.getWorkflow(workflow.getParentWorkflowId(), false); - decider.decide(parent.getWorkflowId(), this); + decide(parent.getWorkflowId()); } - + Monitors.recordWorkflowCompletion(workflow.getWorkflowType(), workflow.getEndTime() - workflow.getStartTime()); queue.remove(deciderQueue, workflow.getWorkflowId()); //remove from the sweep queue } @@ -353,7 +358,7 @@ public void terminateWorkflow(Workflow workflow, String reason, String failureWo // care of this again! if (workflow.getParentWorkflowId() != null) { Workflow parent = edao.getWorkflow(workflow.getParentWorkflowId(), false); - decider.decide(parent.getWorkflowId(), this); + decide(parent.getWorkflowId()); } if (!StringUtils.isBlank(failureWorkflow)) { @@ -364,7 +369,7 @@ public void terminateWorkflow(Workflow workflow, String reason, String failureWo input.put("failureStatus", workflow.getStatus().toString()); try { - startWorkflow(failureWorkflow, 1, input, workflowId, null, null); + startWorkflow(failureWorkflow, 1, input, workflowId, null, null, null); } catch (Exception e) { logger.error("Failed to start error workflow", e); Monitors.recordWorkflowStartError(failureWorkflow); @@ -377,39 +382,6 @@ public void terminateWorkflow(Workflow workflow, String reason, String failureWo Monitors.recordWorkflowTermination(workflow.getWorkflowType(), workflow.getStatus()); } - public int scheduleTask(List tasks) throws Exception { - if (tasks == null || tasks.isEmpty()) { - return -1; - } - String workflowId = tasks.get(0).getWorkflowInstanceId(); - Workflow workflow = edao.getWorkflow(workflowId); - int count = workflow.getTasks().size(); - - for (Task task : tasks) { - task.setSeq(++count); - } - - List created = edao.createTasks(tasks); - List createdSystemTasks = created.stream().filter(task -> SystemTaskType.is(task.getTaskType())).collect(Collectors.toList()); - createdSystemTasks.parallelStream().forEach(task -> { - - WorkflowSystemTask stt = WorkflowSystemTask.get(task.getTaskType()); - if(stt == null) { - throw new RuntimeException("No system task found by name " + task.getTaskType()); - } - task.setStartTime(System.currentTimeMillis()); - try { - stt.start(workflow, task, this); - } catch (Exception e) { - throw new RuntimeException(e); - } - edao.updateTask(task); - - }); - - return addTaskToQueue(created); - } - public void updateTask(TaskResult result) throws Exception { if (result == null) { logger.info("null task given for update..." + result); @@ -483,7 +455,8 @@ public void updateTask(TaskResult result) throws Exception { default: break; } - decider.decide(workflowId, this); + + decide(workflowId); if (task.getStatus().isTerminal()) { long duration = getTaskDuration(0, task); @@ -494,15 +467,6 @@ public void updateTask(TaskResult result) throws Exception { } - private long getTaskDuration(long s, Task task) { - long duration = task.getEndTime() - task.getStartTime(); - s += duration; - if (task.getRetriedTaskId() == null) { - return s; - } - return s + getTaskDuration(s, edao.getTask(task.getRetriedTaskId())); - } - public List getTasks(String taskType, String startKey, int count) throws Exception { return edao.getTasks(taskType, startKey, count); } @@ -522,30 +486,52 @@ public List getRunningWorkflowIds(String workflowName) throws Exception return edao.getRunningWorkflowIds(workflowName); } - - public int addTaskToQueue(final List tasks) throws Exception { - int count = 0; - for (Task t : tasks) { - if (!(t instanceof SystemTask)) { - addTaskToQueue(t); - count++; + /** + * + * @param workflowId ID of the workflow to evaluate the state for + * @return true if the workflow has completed (success or failed), false otherwise. + * @throws Exception If there was an error - caller should retry in this case. + */ + public boolean decide(String workflowId) throws Exception { + + Workflow workflow = edao.getWorkflow(workflowId, true); + WorkflowDef def = metadata.get(workflow.getWorkflowType(), workflow.getVersion()); + try { + DeciderOutcome outcome = decider.decide(workflow, def); + if(outcome.isComplete) { + completeWorkflow(workflow); + return true; } - } - return count; - } + + List tasksToBeScheduled = outcome.tasksToBeScheduled; + List tasksToBeUpdated = outcome.tasksToBeUpdated; + boolean stateChanged = false; + + workflow.getTasks().addAll(tasksToBeScheduled); + for(Task task : tasksToBeScheduled) { + if (SystemTaskType.is(task.getTaskType()) && !task.getStatus().isTerminal()) { + WorkflowSystemTask stt = WorkflowSystemTask.get(task.getTaskType()); + if (stt.execute(workflow, task, this)) { + tasksToBeUpdated.add(task); + stateChanged = true; + } + } + } + stateChanged = scheduleTask(tasksToBeScheduled) || stateChanged; - public void addTaskToQueue(Task task) throws Exception { - // put in queue - queue.remove(task.getTaskType(), task.getTaskId()); - if (task.getCallbackAfterSeconds() > 0) { - queue.push(task.getTaskType(), task.getTaskId(), task.getCallbackAfterSeconds()); - } else { - queue.push(task.getTaskType(), task.getTaskId(), 0); + edao.updateTasks(tasksToBeUpdated); + if(stateChanged) { + edao.updateWorkflow(workflow); + queue.push(deciderQueue, workflow.getWorkflowId(), config.getSweepFrequency()); + decide(workflowId); + } + + } catch (TerminateWorkflow tw) { + logger.debug(tw.getMessage(), tw); + terminate(def, workflow, tw); + return true; } - } - - public void decide(String workflowId) throws Exception { - decider.decide(workflowId, this); + return false; } public void pauseWorkflow(String workflowId) throws Exception { @@ -554,7 +540,7 @@ public void pauseWorkflow(String workflowId) throws Exception { if(workflow.getStatus().isTerminal()){ throw new ApplicationException(Code.CONFLICT, "Workflow id " + workflowId + " has ended, status cannot be updated."); } - if(workflow.getStatus().equals(status)){ + if (workflow.getStatus().equals(status)) { return; //Already paused! } workflow.setStatus(status); @@ -568,7 +554,7 @@ public void resumeWorkflow(String workflowId) throws Exception{ } workflow.setStatus(WorkflowStatus.RUNNING); edao.updateWorkflow(workflow); - decider.decide(workflowId, this); + decide(workflowId); } public void skipTaskFromWorkflow(String workflowId, String taskReferenceName, SkipTaskRequest skipTaskRequest) throws Exception { @@ -607,16 +593,94 @@ public void skipTaskFromWorkflow(String workflowId, String taskReferenceName, Sk theTask.setOutputData(skipTaskRequest.getTaskOutput()); } edao.createTasks(Arrays.asList(theTask)); - decider.decide(workflowId, this); + decide(workflowId); } - void cleanupFromPending(Workflow workflow) { - edao.removeFromPendingWorkflow(workflow.getWorkflowType(), workflow.getWorkflowId()); - queue.remove(deciderQueue, workflow.getWorkflowId()); - } - public Workflow getWorkflow(String workflowId, boolean includeTasks) { return edao.getWorkflow(workflowId, includeTasks); } + + public void addTaskToQueue(Task task) throws Exception { + // put in queue + queue.remove(task.getTaskType(), task.getTaskId()); + if (task.getCallbackAfterSeconds() > 0) { + queue.push(task.getTaskType(), task.getTaskId(), task.getCallbackAfterSeconds()); + } else { + queue.push(task.getTaskType(), task.getTaskId(), 0); + } + } + + private long getTaskDuration(long s, Task task) { + long duration = task.getEndTime() - task.getStartTime(); + s += duration; + if (task.getRetriedTaskId() == null) { + return s; + } + return s + getTaskDuration(s, edao.getTask(task.getRetriedTaskId())); + } + + private boolean scheduleTask(List tasks) throws Exception { + + if (tasks == null || tasks.isEmpty()) { + return false; + } + + String workflowId = tasks.get(0).getWorkflowInstanceId(); + Workflow workflow = edao.getWorkflow(workflowId); + int count = workflow.getTasks().size(); + + for (Task task : tasks) { + task.setSeq(++count); + } + + List created = edao.createTasks(tasks); + List createdSystemTasks = created.stream().filter(task -> SystemTaskType.is(task.getTaskType())).collect(Collectors.toList()); + boolean startedSystemTasks = false; + for(Task task : createdSystemTasks) { + + WorkflowSystemTask stt = WorkflowSystemTask.get(task.getTaskType()); + if(stt == null) { + throw new RuntimeException("No system task found by name " + task.getTaskType()); + } + task.setStartTime(System.currentTimeMillis()); + stt.start(workflow, task, this); + edao.updateTask(task); + startedSystemTasks = true; + } + + return addTaskToQueue(created) || startedSystemTasks; + } + + private boolean addTaskToQueue(final List tasks) throws Exception { + boolean stateChanged = false; + for (Task t : tasks) { + if (!(t instanceof SystemTask)) { + addTaskToQueue(t); + stateChanged = true; + } + } + return stateChanged; + } + + private void terminate(final WorkflowDef def, final Workflow workflow, TerminateWorkflow tw) throws Exception { + + if (!workflow.getStatus().isTerminal()) { + workflow.setStatus(tw.workflowStatus); + } + + String failureWorkflow = def.getFailureWorkflow(); + if (failureWorkflow != null) { + if (failureWorkflow.startsWith("$")) { + String[] paramPathComponents = failureWorkflow.split("\\."); + String name = paramPathComponents[2]; // name of the input parameter + failureWorkflow = (String) workflow.getInput().get(name); + } + } + if(tw.task != null){ + edao.updateTask(tw.task); + } + terminateWorkflow(workflow, tw.getMessage(), failureWorkflow); + } + } diff --git a/core/src/main/java/com/netflix/conductor/core/execution/WorkflowSweeper.java b/core/src/main/java/com/netflix/conductor/core/execution/WorkflowSweeper.java index aa72d98ede..6faf65cb55 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/WorkflowSweeper.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/WorkflowSweeper.java @@ -32,12 +32,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.conductor.core.WorkflowContext; import com.netflix.conductor.core.config.Configuration; import com.netflix.conductor.core.execution.ApplicationException.Code; -import com.netflix.conductor.dao.ExecutionDAO; -import com.netflix.conductor.dao.MetadataDAO; import com.netflix.conductor.dao.QueueDAO; import com.netflix.conductor.metrics.Monitors; @@ -53,10 +50,6 @@ public class WorkflowSweeper { private ExecutorService es; - protected ObjectMapper om; - - private DeciderService ds; - private Configuration config; private QueueDAO queues; @@ -66,31 +59,30 @@ public class WorkflowSweeper { private static final String className = WorkflowSweeper.class.getSimpleName(); @Inject - public WorkflowSweeper(ExecutionDAO edao, MetadataDAO metadata, ObjectMapper om, WorkflowExecutor workflowProvider, Configuration config, QueueDAO queues) { - this.om = om; - this.ds = new DeciderService(metadata, edao, om); + public WorkflowSweeper(WorkflowExecutor executor, Configuration config, QueueDAO queues) { this.config = config; this.queues = queues; this.executorThreadPoolSize = config.getIntProperty("workflow.sweeper.thread.count", 5); this.es = Executors.newFixedThreadPool(executorThreadPoolSize); - init(workflowProvider); + init(executor); logger.info("Workflow Sweeper Initialized"); } - public void init(WorkflowExecutor workflowProvider) { + public void init(WorkflowExecutor executor) { ScheduledExecutorService deciderPool = Executors.newScheduledThreadPool(1); deciderPool.scheduleWithFixedDelay(() -> { try{ - - if(config.disableSweep()){ + boolean disable = config.disableSweep(); + logger.debug("Workflow Sweep disabled? {}", disable); + if (disable) { logger.info("Workflow sweep is disabled."); return; } List workflowIds = queues.pop(WorkflowExecutor.deciderQueue, 2 * executorThreadPoolSize, 2000); - sweep(workflowIds, workflowProvider); + sweep(workflowIds, executor); }catch(Exception e){ Monitors.error(className, "sweep"); @@ -102,7 +94,7 @@ public void init(WorkflowExecutor workflowProvider) { }, 500, 500, TimeUnit.MILLISECONDS); } - public void sweep(List workflowIds, WorkflowExecutor workflowProvider) throws Exception { + public void sweep(List workflowIds, WorkflowExecutor executor) throws Exception { List> futures = new LinkedList<>(); for (String workflowId : workflowIds) { @@ -114,7 +106,7 @@ public void sweep(List workflowIds, WorkflowExecutor workflowProvider) t if(logger.isDebugEnabled()) { logger.debug("Running sweeper for workflow {}", workflowId); } - boolean done = ds.decide(workflowId, workflowProvider); + boolean done = executor.decide(workflowId); if(!done) { queues.setUnackTimeout(WorkflowExecutor.deciderQueue, workflowId, config.getSweepFrequency() * 1000); } else { @@ -123,7 +115,7 @@ public void sweep(List workflowIds, WorkflowExecutor workflowProvider) t } catch (ApplicationException e) { if(e.getCode().equals(Code.NOT_FOUND)) { - logger.error("Workflow NOT found id=" + workflowId, e); + logger.error("Workflow NOT found for id: " + workflowId, e); queues.remove(WorkflowExecutor.deciderQueue, workflowId); } diff --git a/core/src/main/java/com/netflix/conductor/core/execution/tasks/Event.java b/core/src/main/java/com/netflix/conductor/core/execution/tasks/Event.java index fc4ec2b5db..dc31a6c387 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/tasks/Event.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/tasks/Event.java @@ -26,12 +26,14 @@ import org.slf4j.LoggerFactory; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.annotations.VisibleForTesting; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.core.events.EventQueues; import com.netflix.conductor.core.events.queue.Message; import com.netflix.conductor.core.events.queue.ObservableQueue; +import com.netflix.conductor.core.execution.ParametersUtils; import com.netflix.conductor.core.execution.WorkflowExecutor; /** @@ -44,6 +46,8 @@ public class Event extends WorkflowSystemTask { private ObjectMapper om = new ObjectMapper(); + private ParametersUtils pu = new ParametersUtils(); + private enum Sink { conductor, sqs } @@ -93,36 +97,37 @@ public void cancel(Workflow workflow, Task task, WorkflowExecutor provider) thro getQueue(workflow, task).ack(Arrays.asList(message)); } - private ObservableQueue getQueue(Workflow workflow, Task task) { - String sinkValue = "" + task.getInputData().get("sink"); - int indx = sinkValue.indexOf(':'); - if(indx != -1) { - sinkValue = sinkValue.substring(0, indx); - } + @VisibleForTesting + ObservableQueue getQueue(Workflow workflow, Task task) { + + String sinkValueRaw = "" + task.getInputData().get("sink"); + Map input = new HashMap<>(); + input.put("sink", sinkValueRaw); + Map replaced = pu.getTaskInputV2(input, workflow, task.getTaskId(), null); + String sinkValue = (String)replaced.get("sink"); + + String queueName = null; Sink sink = null; - try { - sink = Sink.valueOf(sinkValue); - }catch(Exception e) { - logger.error(e.getMessage(), e); - } - if(sink == null) { + + if("conductor".equals(sinkValue)) { + sink = Sink.conductor; + queueName = workflow.getWorkflowType() + ":" + task.getReferenceTaskName(); + + } else if(sinkValue.startsWith("sqs:")) { + sink = Sink.sqs; + queueName = sinkValue.substring(4); + + } else { task.setStatus(Status.FAILED); - task.setReasonForIncompletion("Invalid sink specified: " + sinkValue); + task.setReasonForIncompletion("Invalid / Unsupported sink specified: " + sinkValue); return null; } - String event = null; - if(sink == Sink.conductor) { - String cq = workflow.getWorkflowType() + ":" + task.getReferenceTaskName(); - event = "conductor:" + cq; - } else if(sink == Sink.sqs ) { - event = ""+task.getInputData().get("sink"); - } - - task.getOutputData().put("event_produced", event); + String eventProduced = sink.name() + ":" + queueName; + task.getOutputData().put("event_produced", eventProduced); try { - return EventQueues.getQueue(event, true); + return EventQueues.getQueue(eventProduced, true); }catch(Exception e) { logger.error(e.getMessage(), e); task.setStatus(Status.FAILED); diff --git a/core/src/main/java/com/netflix/conductor/core/execution/tasks/Join.java b/core/src/main/java/com/netflix/conductor/core/execution/tasks/Join.java index 9c1d087b8f..d32db26073 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/tasks/Join.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/tasks/Join.java @@ -41,7 +41,7 @@ public boolean execute(Workflow workflow, Task task, WorkflowExecutor provider) boolean allDone = true; boolean hasFailures = false; - String failureReason = null; + String failureReason = ""; List joinOn = (List) task.getInputData().get("joinOn"); for(String joinOnRef : joinOn){ Task forkedTask = workflow.getTaskByRefName(joinOnRef); diff --git a/core/src/main/java/com/netflix/conductor/core/execution/tasks/SubWorkflow.java b/core/src/main/java/com/netflix/conductor/core/execution/tasks/SubWorkflow.java index 6d3e6e0bd1..47dc453769 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/tasks/SubWorkflow.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/tasks/SubWorkflow.java @@ -57,7 +57,7 @@ public void start(Workflow workflow, Task task, WorkflowExecutor provider) throw try { - String subWorkflowId = provider.startWorkflow(name, version, wfInput, correlationId, workflow.getWorkflowId(), task.getTaskId()); + String subWorkflowId = provider.startWorkflow(name, version, wfInput, correlationId, workflow.getWorkflowId(), task.getTaskId(), null); task.getInputData().put("subWorkflowId", subWorkflowId); task.getOutputData().put("subWorkflowId", subWorkflowId); task.setStatus(Status.IN_PROGRESS); diff --git a/core/src/main/java/com/netflix/conductor/dao/IndexDAO.java b/core/src/main/java/com/netflix/conductor/dao/IndexDAO.java index b531010f32..53436f37a2 100644 --- a/core/src/main/java/com/netflix/conductor/dao/IndexDAO.java +++ b/core/src/main/java/com/netflix/conductor/dao/IndexDAO.java @@ -87,6 +87,5 @@ public interface IndexDAO { * @param msg Message */ public void addMessage(String queue, Message msg); - } \ No newline at end of file diff --git a/core/src/main/java/com/netflix/conductor/metrics/Monitors.java b/core/src/main/java/com/netflix/conductor/metrics/Monitors.java index 316325baf7..53b5fbce85 100644 --- a/core/src/main/java/com/netflix/conductor/metrics/Monitors.java +++ b/core/src/main/java/com/netflix/conductor/metrics/Monitors.java @@ -195,4 +195,8 @@ public static void recordUpdateConflict(String taskType, String workflowType, Wo public static void recordUpdateConflict(String taskType, String workflowType, Status status) { counter(classQualifier, "task_update_conflict", "workflowName", workflowType, "taskType", taskType, "workflowStatus", status.name()); } + + public static void recordWorkflowCompletion(String workflowType, long duration) { + getTimer(classQualifier, "workflow_execution", "workflowName", workflowType).record(duration, TimeUnit.MILLISECONDS); + } } \ No newline at end of file diff --git a/core/src/main/java/com/netflix/conductor/service/ExecutionService.java b/core/src/main/java/com/netflix/conductor/service/ExecutionService.java index c21f0990aa..2b63deee73 100644 --- a/core/src/main/java/com/netflix/conductor/service/ExecutionService.java +++ b/core/src/main/java/com/netflix/conductor/service/ExecutionService.java @@ -311,10 +311,6 @@ public boolean addEventExecution(EventExecution ee) { public void updateEventExecution(EventExecution ee) { edao.updateEventExecution(ee); } - - public List getEventExecutions(String eventHandlerName, String eventName, String messageId, int max) { - return edao.getEventExecutions(eventHandlerName, eventName, messageId, max); - } public void addMessage(String name, Message msg) { edao.addMessage(name, msg); diff --git a/core/src/main/java/com/netflix/conductor/service/MetadataService.java b/core/src/main/java/com/netflix/conductor/service/MetadataService.java index d8bc33aced..239c2075e4 100644 --- a/core/src/main/java/com/netflix/conductor/service/MetadataService.java +++ b/core/src/main/java/com/netflix/conductor/service/MetadataService.java @@ -35,7 +35,8 @@ import com.netflix.conductor.dao.MetadataDAO; /** - * @author Viren Workflow Manager + * @author Viren + * */ @Singleton @Trace @@ -48,7 +49,11 @@ public MetadataService(MetadataDAO metadata) { this.metadata = metadata; } - public void registerTaskDef(List taskDefs) throws Exception { + /** + * + * @param taskDefs Task Definitions to register + */ + public void registerTaskDef(List taskDefs) { for (TaskDef taskDef : taskDefs) { taskDef.setCreatedBy(WorkflowContext.get().getClientApp()); taskDef.setCreateTime(System.currentTimeMillis()); @@ -58,7 +63,11 @@ public void registerTaskDef(List taskDefs) throws Exception { } } - public void updateTaskDef(TaskDef taskDef) throws Exception { + /** + * + * @param taskDef Task Definition to be updated + */ + public void updateTaskDef(TaskDef taskDef) { TaskDef existing = metadata.getTaskDef(taskDef.getName()); if (existing == null) { throw new ApplicationException(Code.NOT_FOUND, "No such task by name " + taskDef.getName()); @@ -68,43 +77,82 @@ public void updateTaskDef(TaskDef taskDef) throws Exception { metadata.updateTaskDef(taskDef); } + /** + * + * @param taskType Remove task definition + */ public void unregisterTaskDef(String taskType) { metadata.removeTaskDef(taskType); } - public List getTaskDefs() throws Exception { + /** + * + * @return List of all the registered tasks + */ + public List getTaskDefs() { return metadata.getAllTaskDefs(); } - public TaskDef getTaskDef(String taskType) throws Exception { + /** + * + * @param taskType Task to retrieve + * @return Task Definition + */ + public TaskDef getTaskDef(String taskType) { return metadata.getTaskDef(taskType); } - public void updateWorkflowDef(WorkflowDef def) throws Exception { + /** + * + * @param def Workflow definition to be updated + */ + public void updateWorkflowDef(WorkflowDef def) { metadata.update(def); } - public void updateWorkflowDef(List wfs) throws Exception { + /** + * + * @param wfs Workflow definitions to be updated. + */ + public void updateWorkflowDef(List wfs) { for (WorkflowDef wf : wfs) { metadata.update(wf); } } - public WorkflowDef getWorkflowDef(String name, Integer version) throws Exception { + /** + * + * @param name Name of the workflow to retrieve + * @param version Optional. Version. If null, then retrieves the latest + * @return Workflow definition + */ + public WorkflowDef getWorkflowDef(String name, Integer version) { if (version == null) { return metadata.getLatest(name); } return metadata.get(name, version); } + + /** + * + * @param name Name of the workflow to retrieve + * @return Latest version of the workflow definition + */ + public WorkflowDef getLatestWorkflow(String name) { + return metadata.getLatest(name); + } - public List getWorkflowDefs() throws Exception { + public List getWorkflowDefs() { return metadata.getAll(); } - public void registerWorkflowDef(WorkflowDef def) throws Exception { + public void registerWorkflowDef(WorkflowDef def) { if(def.getName().contains(":")) { throw new ApplicationException(Code.INVALID_INPUT, "Workflow name cannot contain the following set of characters: ':'"); } + if(def.getSchemaVersion() < 1 || def.getSchemaVersion() > 2) { + def.setSchemaVersion(2); + } metadata.create(def); } @@ -160,5 +208,4 @@ private void validateEvent(EventHandler eh) { String event = eh.getEvent(); EventQueues.getQueue(event, true); } - } diff --git a/core/src/test/java/com/netflix/conductor/core/events/MockObservableQueue.java b/core/src/test/java/com/netflix/conductor/core/events/MockObservableQueue.java index bf125e23e1..377c3ff689 100644 --- a/core/src/test/java/com/netflix/conductor/core/events/MockObservableQueue.java +++ b/core/src/test/java/com/netflix/conductor/core/events/MockObservableQueue.java @@ -18,7 +18,13 @@ */ package com.netflix.conductor.core.events; +import java.util.Comparator; +import java.util.HashSet; +import java.util.LinkedList; import java.util.List; +import java.util.Set; +import java.util.TreeSet; +import java.util.stream.Collectors; import com.netflix.conductor.core.events.queue.Message; import com.netflix.conductor.core.events.queue.ObservableQueue; @@ -31,45 +37,59 @@ */ public class MockObservableQueue implements ObservableQueue { - public MockObservableQueue() { - + private String uri; + + private String name; + + private String type; + + private Set messages = new TreeSet<>(Comparator.comparing(Message::getId)); + + public MockObservableQueue(String uri, String name, String type) { + this.uri = uri; + this.name = name; + this.type = type; } @Override public Observable observe() { - return null; + return Observable.from(messages); } public String getType() { - return null; + return type; } @Override public String getName() { - // TODO Auto-generated method stub - return null; + return name; } @Override public String getURI() { - // TODO Auto-generated method stub - return null; + return uri; } @Override public List ack(List messages) { - return null; + messages.removeAll(messages); + return messages.stream().map(Message::getId).collect(Collectors.toList()); } @Override public void publish(List messages) { - // TODO Auto-generated method stub - + this.messages.addAll(messages); } @Override public long size() { - return 0; + return messages.size(); } + @Override + public String toString() { + return "MockObservableQueue [uri=" + uri + ", name=" + name + ", type=" + type + "]"; + } + + } diff --git a/core/src/test/java/com/netflix/conductor/core/events/MockQueueProvider.java b/core/src/test/java/com/netflix/conductor/core/events/MockQueueProvider.java new file mode 100644 index 0000000000..da21b285e9 --- /dev/null +++ b/core/src/test/java/com/netflix/conductor/core/events/MockQueueProvider.java @@ -0,0 +1,43 @@ +/** + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * + */ +package com.netflix.conductor.core.events; + +import com.netflix.conductor.core.events.EventQueues.QueueType; +import com.netflix.conductor.core.events.queue.ObservableQueue; + +/** + * @author Viren + * + */ +public class MockQueueProvider implements EventQueueProvider { + + private QueueType type; + + public MockQueueProvider(QueueType type) { + this.type = type; + EventQueues.registerProvider(type, this); + } + + + @Override + public ObservableQueue getQueue(String queueURI) { + return new MockObservableQueue(queueURI, queueURI, type.name()); + } + +} diff --git a/core/src/test/java/com/netflix/conductor/core/events/TestEventProcessor.java b/core/src/test/java/com/netflix/conductor/core/events/TestEventProcessor.java index d181502013..3ccc1c92ba 100644 --- a/core/src/test/java/com/netflix/conductor/core/events/TestEventProcessor.java +++ b/core/src/test/java/com/netflix/conductor/core/events/TestEventProcessor.java @@ -110,7 +110,7 @@ public String answer(InvocationOnMock invocation) throws Throwable { started.set(true); return id; } - }).when(executor).startWorkflow(action.getStart_workflow().getName(), 1, action.getStart_workflow().getCorrelationId(), action.getStart_workflow().getInput()); + }).when(executor).startWorkflow(action.getStart_workflow().getName(), 1, action.getStart_workflow().getCorrelationId(), action.getStart_workflow().getInput(), event); //Metadata Service Mock MetadataService metadata = mock(MetadataService.class); diff --git a/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderOutcomes.java b/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderOutcomes.java new file mode 100644 index 0000000000..f6745bd5aa --- /dev/null +++ b/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderOutcomes.java @@ -0,0 +1,134 @@ +/** + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * + */ +package com.netflix.conductor.core.execution; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.InputStream; + +import org.junit.Before; +import org.junit.Test; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.conductor.common.metadata.tasks.Task.Status; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; +import com.netflix.conductor.common.run.Workflow; +import com.netflix.conductor.core.execution.DeciderService.DeciderOutcome; +import com.netflix.conductor.dao.MetadataDAO; + +/** + * @author Viren + * + */ +public class TestDeciderOutcomes { + + private DeciderService ds; + + private ObjectMapper om = new ObjectMapper(); + + @Before + public void init() throws Exception { + + MetadataDAO metadata = mock(MetadataDAO.class); + TaskDef td = new TaskDef(); + when(metadata.getTaskDef(any())).thenReturn(td); + this.ds = new DeciderService(metadata, om); + } + + @Test + public void testWorkflowWithNoTasks() throws Exception { + InputStream stream = TestDeciderOutcomes.class.getResourceAsStream("/conditional_flow.json"); + WorkflowDef def = om.readValue(stream, WorkflowDef.class); + assertNotNull(def); + + Workflow workflow = new Workflow(); + workflow.setWorkflowType(def.getName()); + workflow.setStartTime(0); + workflow.getInput().put("param1", "nested"); + workflow.getInput().put("param2", "one"); + + DeciderOutcome outcome = ds.decide(workflow, def); + assertNotNull(outcome); + assertFalse(outcome.isComplete); + assertTrue(outcome.tasksToBeUpdated.isEmpty()); + assertEquals(3, outcome.tasksToBeScheduled.size()); + System.out.println(outcome.tasksToBeScheduled); + + outcome.tasksToBeScheduled.forEach(t -> t.setStatus(Status.COMPLETED)); + workflow.getTasks().addAll(outcome.tasksToBeScheduled); + outcome = ds.decide(workflow, def); + assertFalse(outcome.isComplete); + assertEquals(outcome.tasksToBeUpdated.toString(), 3, outcome.tasksToBeUpdated.size()); + assertEquals(1, outcome.tasksToBeScheduled.size()); + assertEquals("junit_task_3", outcome.tasksToBeScheduled.get(0).getTaskDefName()); + System.out.println(outcome.tasksToBeScheduled); + } + + + @Test + public void testRetries() { + WorkflowDef def = new WorkflowDef(); + def.setName("test"); + + WorkflowTask task = new WorkflowTask(); + task.setName("test_task"); + task.setType("USER_TASK"); + task.setTaskReferenceName("t0"); + task.getInputParameters().put("taskId", "${CPEWF_TASK_ID}"); + + def.getTasks().add(task); + def.setSchemaVersion(2); + + + Workflow workflow = new Workflow(); + workflow.setStartTime(System.currentTimeMillis()); + DeciderOutcome outcome = ds.decide(workflow, def); + assertNotNull(outcome); + + System.out.println(outcome.tasksToBeScheduled); + assertEquals(1, outcome.tasksToBeScheduled.size()); + assertEquals(task.getTaskReferenceName(), outcome.tasksToBeScheduled.get(0).getReferenceTaskName()); + System.out.println(outcome.tasksToBeScheduled.get(0).getInputData()); + String task1Id = outcome.tasksToBeScheduled.get(0).getTaskId(); + assertEquals(task1Id, outcome.tasksToBeScheduled.get(0).getInputData().get("taskId")); + + outcome.tasksToBeScheduled.get(0).setStatus(Status.FAILED); + workflow.getTasks().addAll(outcome.tasksToBeScheduled); + + outcome = ds.decide(workflow, def); + assertNotNull(outcome); + System.out.println(outcome.tasksToBeScheduled); + System.out.println(outcome.tasksToBeUpdated); + + assertEquals(1, outcome.tasksToBeUpdated.size()); + assertEquals(task1Id, outcome.tasksToBeUpdated.get(0).getTaskId()); + assertNotSame(task1Id, outcome.tasksToBeScheduled.get(0).getTaskId()); + assertEquals(outcome.tasksToBeScheduled.get(0).getTaskId(), outcome.tasksToBeScheduled.get(0).getInputData().get("taskId")); + } + +} diff --git a/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java b/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java index 337d14a40c..79e74cea4e 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java @@ -47,6 +47,7 @@ import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.common.run.Workflow.WorkflowStatus; +import com.netflix.conductor.core.execution.DeciderService.DeciderOutcome; import com.netflix.conductor.dao.MetadataDAO; @@ -62,7 +63,11 @@ public class TestDeciderService { @Before public void setup(){ - ds = new DeciderService(); + MetadataDAO mdao = mock(MetadataDAO.class); + TaskDef taskDef = new TaskDef(); + when(mdao.getTaskDef(any())).thenReturn(taskDef); + + ds = new DeciderService(mdao, new ObjectMapper()); workflow = new Workflow(); workflow.getInput().put("requestId", "request id 001"); @@ -92,11 +97,7 @@ public void setup(){ workflow.getTasks().add(task); workflow.getTasks().add(task2); - - MetadataDAO mdao = mock(MetadataDAO.class); - TaskDef taskDef = new TaskDef(); - when(mdao.getTaskDef(any())).thenReturn(taskDef); - ds.setMetadata(mdao); + } @@ -435,8 +436,8 @@ public void testCaseStatement() throws Exception { wf.setVersion(def.getVersion()); wf.setStatus(WorkflowStatus.RUNNING); - - List scheduledTasks = ds.startWorkflow(wf, def); + DeciderOutcome outcome = ds.decide(wf, def); + List scheduledTasks = outcome.tasksToBeScheduled; assertNotNull(scheduledTasks); assertEquals(2, scheduledTasks.size()); assertEquals(Status.IN_PROGRESS, scheduledTasks.get(0).getStatus()); diff --git a/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestEvent.java b/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestEvent.java index 8c58245925..b2f7402903 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestEvent.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestEvent.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doAnswer; @@ -29,15 +30,21 @@ import java.util.LinkedList; import java.util.List; +import org.junit.Before; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; +import com.netflix.conductor.core.events.EventQueues.QueueType; +import com.netflix.conductor.core.events.MockQueueProvider; import com.netflix.conductor.core.events.queue.Message; +import com.netflix.conductor.core.events.queue.ObservableQueue; import com.netflix.conductor.core.events.queue.dyno.DynoEventQueueProvider; +import com.netflix.conductor.core.execution.ParametersUtils; import com.netflix.conductor.core.execution.TestConfiguration; import com.netflix.conductor.dao.QueueDAO; @@ -47,6 +54,96 @@ */ public class TestEvent { + @Before + public void setup() { + new MockQueueProvider(QueueType.sqs); + new MockQueueProvider(QueueType.conductor); + } + + @Test + public void testEvent() { + System.setProperty("QUEUE_NAME", "queue_name_001"); + ParametersUtils pu = new ParametersUtils(); + String eventt = "queue_${QUEUE_NAME}"; + String event = pu.replace(eventt).toString(); + assertNotNull(event); + assertEquals("queue_queue_name_001", event); + + eventt = "queue_9"; + event = pu.replace(eventt).toString(); + assertNotNull(event); + assertEquals(eventt, event); + } + + @Test + public void testSinkParam() { + String sink = "sqs:queue_name"; + + Workflow workflow = new Workflow(); + workflow.setWorkflowType("wf0"); + + Task task1 = new Task(); + task1.setReferenceTaskName("t1"); + task1.getOutputData().put("q", "t1_queue"); + workflow.getTasks().add(task1); + + Task task2 = new Task(); + task2.setReferenceTaskName("t2"); + task2.getOutputData().put("q", "task2_queue"); + workflow.getTasks().add(task2); + + Task task = new Task(); + task.setReferenceTaskName("event"); + task.getInputData().put("sink", sink); + task.setTaskType(WorkflowTask.Type.EVENT.name()); + workflow.getTasks().add(task); + + Event event = new Event(); + ObservableQueue queue = event.getQueue(workflow, task); + assertNotNull(task.getReasonForIncompletion(), queue); + assertEquals("queue_name", queue.getName()); + assertEquals("sqs", queue.getType()); + + sink = "sqs:${t1.output.q}"; + task.getInputData().put("sink", sink); + queue = event.getQueue(workflow, task); + assertNotNull(queue); + assertEquals("t1_queue", queue.getName()); + assertEquals("sqs", queue.getType()); + System.out.println(task.getOutputData().get("event_produced")); + + sink = "sqs:${t2.output.q}"; + task.getInputData().put("sink", sink); + queue = event.getQueue(workflow, task); + assertNotNull(queue); + assertEquals("task2_queue", queue.getName()); + assertEquals("sqs", queue.getType()); + System.out.println(task.getOutputData().get("event_produced")); + + sink = "conductor"; + task.getInputData().put("sink", sink); + queue = event.getQueue(workflow, task); + assertNotNull(queue); + assertEquals(workflow.getWorkflowType() + ":" + task.getReferenceTaskName(), queue.getName()); + assertEquals("conductor", queue.getType()); + System.out.println(task.getOutputData().get("event_produced")); + + sink = "sqs:static_value"; + task.getInputData().put("sink", sink); + queue = event.getQueue(workflow, task); + assertNotNull(queue); + assertEquals("static_value", queue.getName()); + assertEquals("sqs", queue.getType()); + assertEquals(sink, task.getOutputData().get("event_produced")); + System.out.println(task.getOutputData().get("event_produced")); + + sink = "bad:queue"; + task.getInputData().put("sink", sink); + queue = event.getQueue(workflow, task); + assertNull(queue); + assertEquals(Task.Status.FAILED, task.getStatus()); + } + @Test public void test() throws Exception { Event event = new Event(); diff --git a/core/src/test/resources/conditional_flow.json b/core/src/test/resources/conditional_flow.json new file mode 100644 index 0000000000..2f057b756b --- /dev/null +++ b/core/src/test/resources/conditional_flow.json @@ -0,0 +1,108 @@ +{ + "name": "ConditionalTaskWF", + "description": "ConditionalTaskWF", + "version": 1, + "tasks": [ + { + "name": "conditional", + "taskReferenceName": "conditional", + "inputParameters": { + "case": "workflow.input.param1" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "nested": [ + { + "name": "conditional2", + "taskReferenceName": "conditional2", + "inputParameters": { + "case": "workflow.input.param2" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "one": [ + { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "junit_task_3", + "taskReferenceName": "t3", + "type": "SIMPLE", + "startDelay": 0 + } + ], + "two": [ + { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0 + } + ] + }, + "startDelay": 0 + } + ], + "three": [ + { + "name": "junit_task_3", + "taskReferenceName": "t3", + "type": "SIMPLE", + "startDelay": 0 + } + ] + }, + "defaultCase": [ + { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0 + } + ], + "startDelay": 0 + }, + { + "name": "finalcondition", + "taskReferenceName": "tf", + "inputParameters": { + "finalCase": "workflow.input.finalCase" + }, + "type": "DECISION", + "caseValueParam": "finalCase", + "decisionCases": { + "notify": [ + { + "name": "junit_task_4", + "taskReferenceName": "junit_task_4", + "type": "SIMPLE", + "startDelay": 0 + } + ] + }, + "startDelay": 0 + } + ], + "inputParameters": [ + "param1", + "param2" + ], + "schemaVersion": 1 +} \ No newline at end of file diff --git a/core/src/test/resources/perf.json b/core/src/test/resources/perf.json deleted file mode 100644 index 9e1645ce0f..0000000000 --- a/core/src/test/resources/perf.json +++ /dev/null @@ -1,285 +0,0 @@ -{ - "name": "unit_test_1", - "description": "unit_test_1", - "version": 1, - "tasks": [ - { - "name": "perf_task_1", - "taskReferenceName": "perf_task_1", - "inputParameters": { - "mod": "workflow.input.mod", - "oddEven": "workflow.input.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "dyntask", - "taskReferenceName": "perf_task_2", - "inputParameters": { - "taskToExecute": "workflow.input.task2Name" - }, - "type": "DYNAMIC", - "dynamicTaskNameParam": "taskToExecute" - }, - { - "name": "perf_task_3", - "taskReferenceName": "perf_task_3", - "inputParameters": { - "mod": "perf_task_2.output.mod", - "oddEven": "perf_task_2.output.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "oddEvenDecision", - "taskReferenceName": "oddEvenDecision", - "inputParameters": { - "oddEven": "perf_task_3.output.oddEven" - }, - "type": "DECISION", - "caseValueParam": "oddEven", - "decisionCases": { - "0": [ - { - "name": "perf_task_4", - "taskReferenceName": "perf_task_4", - "inputParameters": { - "mod": "perf_task_3.output.mod", - "oddEven": "perf_task_3.output.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "perf_task_5", - "taskReferenceName": "perf_task_5", - "inputParameters": { - "mod": "perf_task_4.output.mod", - "oddEven": "perf_task_4.output.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "perf_task_6", - "taskReferenceName": "perf_task_6", - "inputParameters": { - "mod": "perf_task_5.output.mod", - "oddEven": "perf_task_5.output.oddEven" - }, - "type": "SIMPLE" - } - ], - "1": [ - { - "name": "perf_task_7", - "taskReferenceName": "perf_task_7", - "inputParameters": { - "mod": "perf_task_3.output.mod", - "oddEven": "perf_task_3.output.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "perf_task_8", - "taskReferenceName": "perf_task_8", - "inputParameters": { - "mod": "perf_task_7.output.mod", - "oddEven": "perf_task_7.output.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "perf_task_9", - "taskReferenceName": "perf_task_9", - "inputParameters": { - "mod": "perf_task_8.output.mod", - "oddEven": "perf_task_8.output.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "modDecision", - "taskReferenceName": "modDecision", - "inputParameters": { - "mod": "perf_task_8.output.mod" - }, - "type": "DECISION", - "caseValueParam": "mod", - "decisionCases": { - "0": [ - { - "name": "perf_task_12", - "taskReferenceName": "perf_task_12", - "inputParameters": { - "mod": "perf_task_9.output.mod", - "oddEven": "perf_task_9.output.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "perf_task_13", - "taskReferenceName": "perf_task_13", - "inputParameters": { - "mod": "perf_task_12.output.mod", - "oddEven": "perf_task_12.output.oddEven" - }, - "type": "SIMPLE" - } - ], - "1": [ - { - "name": "perf_task_15", - "taskReferenceName": "perf_task_15", - "inputParameters": { - "mod": "perf_task_9.output.mod", - "oddEven": "perf_task_9.output.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "perf_task_16", - "taskReferenceName": "perf_task_16", - "inputParameters": { - "mod": "perf_task_15.output.mod", - "oddEven": "perf_task_15.output.oddEven" - }, - "type": "SIMPLE" - } - ], - "4": [ - { - "name": "perf_task_18", - "taskReferenceName": "perf_task_18", - "inputParameters": { - "mod": "perf_task_9.output.mod", - "oddEven": "perf_task_9.output.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "perf_task_19", - "taskReferenceName": "perf_task_19", - "inputParameters": { - "mod": "perf_task_18.output.mod", - "oddEven": "perf_task_18.output.oddEven" - }, - "type": "SIMPLE" - } - ], - "5": [ - { - "name": "perf_task_21", - "taskReferenceName": "perf_task_21", - "inputParameters": { - "mod": "perf_task_9.output.mod", - "oddEven": "perf_task_9.output.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "perf_task_22", - "taskReferenceName": "perf_task_22", - "inputParameters": { - "mod": "perf_task_21.output.mod", - "oddEven": "perf_task_21.output.oddEven" - }, - "type": "SIMPLE" - } - ] - }, - "defaultCase": [ - { - "name": "perf_task_24", - "taskReferenceName": "perf_task_24", - "inputParameters": { - "mod": "perf_task_9.output.mod", - "oddEven": "perf_task_9.output.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "perf_task_25", - "taskReferenceName": "perf_task_25", - "inputParameters": { - "mod": "perf_task_24.output.mod", - "oddEven": "perf_task_24.output.oddEven" - }, - "type": "SIMPLE" - } - ] - } - ] - } - }, - { - "name": "perf_task_28", - "taskReferenceName": "perf_task_28", - "inputParameters": { - "mod": "perf_task_3.output.mod", - "oddEven": "perf_task_3.output.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "perf_task_29", - "taskReferenceName": "perf_task_29", - "inputParameters": { - "mod": "perf_task_28.output.mod", - "oddEven": "perf_task_28.output.oddEven" - }, - "type": "SIMPLE" - }, - { - "name": "fork", - "taskReferenceName": "fork1", - "type": "FORK_JOIN", - "forkTasks": [ - [ - { - "name": "perf_task_1", - "taskReferenceName": "pt1", - "type": "SIMPLE" - }, - { - "name": "perf_task_28", - "taskReferenceName": "pt28", - "type": "SIMPLE" - } - ], - [ - { - "name": "perf_task_3", - "taskReferenceName": "pt3", - "type": "SIMPLE" - } - ], - [ - { - "name": "perf_task_10", - "taskReferenceName": "pt10", - "type": "SIMPLE" - } - ] - ] - }, - { - "name": "join", - "taskReferenceName": "join1", - "type": "JOIN", - "joinOn": [ - "pt28", - "pt3", - "pt10" - ] - }, - { - "name": "perf_task_30", - "taskReferenceName": "perf_task_30", - "inputParameters": { - "mod": "perf_task_29.output.mod", - "oddEven": "perf_task_29.output.oddEven" - }, - "type": "SIMPLE" - } - ], - "schemaVersion": 1 -} \ No newline at end of file diff --git a/docs/docs/css/custom.css b/docs/docs/css/custom.css index 323a37857a..e41a2e9b55 100644 --- a/docs/docs/css/custom.css +++ b/docs/docs/css/custom.css @@ -14,4 +14,7 @@ code { } .wy-side-nav-search { margin-bottom: 0; +} +body { + font-family: "Arial","proxima-nova","Helvetica Neue","Arial","sans-serif"; } \ No newline at end of file diff --git a/docs/docs/events/index.md b/docs/docs/events/index.md new file mode 100644 index 0000000000..9b83552e2d --- /dev/null +++ b/docs/docs/events/index.md @@ -0,0 +1,122 @@ +## Introduction +Eventing in Conductor provides for loose coupling between workflows and support for producing and consuming events from external systems. + +This includes: + +1. Being able to produce an event (message) in an external system like SQS or internal to Conductor. +2. Start a workflow when a specific event occurs that matches the provided criteria. + +Conductor provides SUB_WORKFLOW task that can be used to embed a workflow inside parent workflow. Eventing supports provides similar capability without explicitly adding dependencies and provides **fire-and-forget** style integrations. + +## Event Task +Event task provides ability to publish an event (message) to either Conductor or an external eventing system like SQS. Event tasks are useful for creating event based dependencies for workflows and tasks. + +See [Event Task](/metadata/systask/#event) for documentation. + +## Event Handler +Event handlers are listeners registered that executes an action when a matching event occurs. The supported actions are: + +1. Start a Workflow +2. Fail a Task +3. Complete a Task + +Event Handlers can be configured to listen to Conductor Events or an external event like SQS. + +### Configuration +Event Handlers are configured via ```/event/``` APIs. + +#### Structure: +```json +{ + "name" : "descriptive unique name", + "event": "event_type:event_location", + "condition": "boolean condition", + "actions": ["see examples below"] +} +``` +#### Condition +Condition is an expression that MUST evaluate to a boolean value. A Javascript like syntax is supported that can be used to evaluate condition based on the payload. +Actions are executed only when the condition evaluates to `true`. + +**Examples** + +Given the following payload in the message: + +```json +{ + "fileType": "AUDIO", + "version": 3, + "metadata": { + length: 300, + codec: "aac" + } +} +``` + +|Expression|Result| +|---|---| +|`$.version > 1`|true| +|`$.version > 10`|false| +|`$.metadata.length == 300`|true| + + +### Actions + +**Start A Workflow** + +```json +{ + "action": "start_workflow", + "start_workflow": { + "name": "WORKFLOW_NAME", + "version": + "input": { + "param1": "${param1}" + } + } +} +``` + +**Complete Task*** + +```json +{ + "action": "complete_task", + "complete_task": { + "workflowId": "${source.externalId.workflowId}", + "taskRefName": "task_1", + "output": { + "response": "${source.result}" + } + }, + "expandInlineJSON": true +} +``` + +**Fail Task*** + +```json +{ + "action": "fail_task", + "fail_task": { + "workflowId": "${source.externalId.workflowId}", + "taskRefName": "task_1", + "output": { + "response": "${source.result}" + } + }, + "expandInlineJSON": true +} +``` +Input for starting a workflow and output when completing / failing task follows the same [expressions](/metadata/#wiring-inputs-and-outputs) used for wiring workflow inputs. + +!!!info "Expanding stringified JSON elements in payload" + `expandInlineJSON` property, when set to true will expand the inlined stringified JSON elements in the payload to JSON documents and replace the string value with JSON document. + This feature allows such elements to be used with JSON path expressions. + +## Extending + +Provide the implementation of [EventQueueProvider](https://github.com/Netflix/conductor/blob/master/core/src/main/java/com/netflix/conductor/core/events/EventQueueProvider.java). + +SQS Queue Provider: +[SQSEventQueueProvider.java ](https://github.com/Netflix/conductor/blob/master/contribs/src/main/java/com/netflix/conductor/core/events/sqs/SQSEventQueueProvider.java) diff --git a/docs/docs/extend/index.md b/docs/docs/extend/index.md index bf27b42f31..308bf0bd96 100644 --- a/docs/docs/extend/index.md +++ b/docs/docs/extend/index.md @@ -4,7 +4,7 @@ Conductor provides a pluggable backend. The current implementation uses Dynomit There are 4 interfaces that needs to be implemented for each backend: ```java -//Store for workfow and task definitions +//Store for workflow and task definitions com.netflix.conductor.dao.MetadataDAO ``` @@ -31,4 +31,4 @@ e.g. SQS for queueing and a relational store for others. To create system tasks follow the steps below: * Extend ```com.netflix.conductor.core.execution.tasks.WorkflowSystemTask``` -* Instantiate the new classs as part of the statup (eager singleton) +* Instantiate the new class as part of the startup (eager singleton) diff --git a/docs/docs/faq.md b/docs/docs/faq.md new file mode 100644 index 0000000000..cb081e6136 --- /dev/null +++ b/docs/docs/faq.md @@ -0,0 +1,42 @@ + + +#### How do you schedule a task to be put in the queue after some time (e.g. 1 hour, 1 day etc.) + +After polling for the task update the status of the task to `IN_PROGRESS` and set the `callbackAfterSeconds` value to the desired time. The task will remain in the queue until the specified second before worker polling for it will receive it again. + +If there is a timeout set for the task, and the `callbackAfterSeconds` exceeds the timeout value, it will result in task being TIMED_OUT. + +#### How long can a workflow be in running state? Can I have a workflow that keeps running for days or months? + +Yes. As long as the timeouts on the tasks are set to handle long running workflows, it will stay in running state. + +#### My workflow fails to start with missing task error + +Ensure all the tasks are registered via `/metadata/taskdefs` APIs. Add any missing task definition (as reported in the error) and try again. + +#### Where does my worker run? How does conductor run my tasks? + +Conductor does not run the workers. When a task is scheduled, it is put into the queue maintained by Conductor. Workers are required to poll for tasks using `/tasks/poll` API at periodic interval, execute the business logic for the task and report back the results using `POST /tasks` API call. +Conductor, however will run [system tasks](/metadata/systask/) on the Conductor server. + +#### How can I schedule workflows to run at a specific time? + +Conductor does not provide any scheduling mechanism. But you can use any of the available scheduling systems to make REST calls to Conductor to start a workflow. Alternatively, publish a message to a supported eventing system like SQS to trigger a workflow. +More details about [eventing](/events). + +#### How do I setup Dynomite cluster? + +Visit Dynomite's github page. [https://github.com/Netflix/dynomite](https://github.com/Netflix/dynomite) to find details on setup and support mechanism. + +#### Can I use conductor with Ruby / Go / Python? + +Yes. Workers can be written any language as long as they can poll and update the task results via HTTP endpoints. + +Conductor provides frameworks for Java and Python to simplify the task of polling and updating the status back to Conductor server. + +**Note:** Python client is currently in development and not battle tested for production use cases. + +#### How can I get help with Dynomite? + +Visit Dynomite's github page. [https://github.com/Netflix/dynomite](https://github.com/Netflix/dynomite) to find details on setup and support mechanism. + diff --git a/docs/docs/img/kitchensink.png b/docs/docs/img/kitchensink.png index e003194101..f14981af0d 100644 Binary files a/docs/docs/img/kitchensink.png and b/docs/docs/img/kitchensink.png differ diff --git a/docs/docs/img/task_states.png b/docs/docs/img/task_states.png new file mode 100644 index 0000000000..22ebfbcadc Binary files /dev/null and b/docs/docs/img/task_states.png differ diff --git a/docs/docs/img/task_states.svg b/docs/docs/img/task_states.svg new file mode 100644 index 0000000000..3fd55079ba --- /dev/null +++ b/docs/docs/img/task_states.svg @@ -0,0 +1,4 @@ + + + + diff --git a/docs/docs/intro/concepts.md b/docs/docs/intro/concepts.md index 4581b0d079..ee0debe72c 100644 --- a/docs/docs/intro/concepts.md +++ b/docs/docs/intro/concepts.md @@ -20,13 +20,17 @@ System tasks are executed within the JVM of the Conductor server and managed by | [FORK_JOIN_DYNAMIC](/metadata/systask/#dynamic-fork) | Similar to FORK, but rather than the set of tasks defined in the blueprint for parallel execution, FORK_JOIN_DYNAMIC spawns the parallel tasks based on the input expression to this task | | [JOIN](/metadata/systask/#join) | Complements FORK and FORK_JOIN_DYNAMIC. Used to merge one of more parallel branches* | [SUB_WORKFLOW](/metadata/systask/#sub-workflow) | Nest another workflow as a sub workflow task. Upon execution it instantiates the sub workflow and awaits it completion| +| [EVENT](/metadata/systask/#event ) | Produces an event in a supported eventing system (e.g. Conductor, SQS)| -Conductor provides an API to create user defined tasks that are excuted in the same JVM as the engine. see [WorkflowSystemTask](https://github.com/Netflix/conductor/blob/dev/core/src/main/java/com/netflix/conductor/core/execution/tasks/WorkflowSystemTask.java) interface for details. + +Conductor provides an API to create user defined tasks that are executed in the same JVM as the engine. see [WorkflowSystemTask](https://github.com/Netflix/conductor/blob/dev/core/src/main/java/com/netflix/conductor/core/execution/tasks/WorkflowSystemTask.java) interface for details. ## Worker Tasks -Worker tasks are implemented by application(s) and runs in a separate environment from Conductor. The worker tasks can be implemented in any langugage. These tasks talk to Conductor server via REST API endpionts to poll for tasks and update its status after execution. +Worker tasks are implemented by application(s) and runs in a separate environment from Conductor. The worker tasks can be implemented in any language. These tasks talk to Conductor server via REST API endpoints to poll for tasks and update its status after execution. Worker tasks are identified by task type __SIMPLE__ in the blueprint. +## Lifecycle of a Workflow Task +![Task_States](/img/task_states.png) [more details](/metadata/#task-definition) diff --git a/docs/docs/intro/index.md b/docs/docs/intro/index.md index 0c6320488c..c5f97e9a9a 100644 --- a/docs/docs/intro/index.md +++ b/docs/docs/intro/index.md @@ -8,7 +8,7 @@ The API and storage layers are pluggable and provide ability to work with differ !!! hint "Running in production" For a detailed configuration guide on installing and running Conductor server in production visit [Conductor Server](/server) documentation. -## Runnin In-Memory Server +## Running In-Memory Server Follow the steps below to quickly bring up a local Conductor instance backed by an in-memory database with a simple kitchen sink workflow that demonstrate all the capabilities of Conductor. diff --git a/docs/docs/metadata/index.md b/docs/docs/metadata/index.md index b01520a000..785dec2fd3 100644 --- a/docs/docs/metadata/index.md +++ b/docs/docs/metadata/index.md @@ -36,7 +36,7 @@ Conductor maintains a registry of worker task types. A task type MUST be regist **Retry Logic** -* FIXED : Reschedule the task afer the ```retryDelaySeconds``` +* FIXED : Reschedule the task after the ```retryDelaySeconds``` * EXPONENTIAL_BACKOFF : reschedule after ```retryDelaySeconds * attempNo``` **Timeout Policy** @@ -46,7 +46,7 @@ Conductor maintains a registry of worker task types. A task type MUST be regist * ALERT_ONLY : Registers a counter (task_timeout) # Workflow Definition -Workflows are define using a JSON based DSL. +Workflows are defined using a JSON based DSL. **Example** ```json @@ -85,7 +85,7 @@ Workflows are define using a JSON based DSL. |name|Name of the workflow|| |description|Descriptive name of the workflow|| |version|Numeric field used to identify the version of the schema. Use incrementing numbers|When starting a workflow execution, if not specified, the definition with highest version is used| -|tasks|An array of task defintions as described below.|| +|tasks|An array of task definitions as described below.|| |outputParameters|JSON template used to generate the output of the workflow|If not specified, the output is defined as the output of the _last_ executed task| |inputParameters|List of input parameters. Used for documenting the required inputs to workflow|optional| @@ -100,7 +100,7 @@ Below are the mandatory minimum parameters required for each task: |type|Type of task. SIMPLE for tasks executed by remote workers, or one of the system task types|| |inputParameters|JSON template that defines the input given to the task|See "wiring inputs and outputs" for details| -In addition to these paramters, additional parameters speciific to the task type are required as documented [here](/metadata/systask/) +In addition to these parameters, additional parameters specific to the task type are required as documented [here](/metadata/systask/) # Wiring Inputs and Outputs @@ -121,7 +121,7 @@ __${SOURCE.input/output.JSONPath}__ !!! note "JSON Path Support" - Conductor supports [JSONPath](http://goessner.net/articles/JsonPath/) specification and uses Java implementaion from [here](https://github.com/jayway/JsonPath). + Conductor supports [JSONPath](http://goessner.net/articles/JsonPath/) specification and uses Java implementation from [here](https://github.com/jayway/JsonPath). **Example** diff --git a/docs/docs/metadata/kitchensink.md b/docs/docs/metadata/kitchensink.md index 7f2f7e5d95..0102aaeed3 100644 --- a/docs/docs/metadata/kitchensink.md +++ b/docs/docs/metadata/kitchensink.md @@ -9,17 +9,27 @@ An example kitchensink workflow that demonstrates the usage of all the schema co "version": 1, "tasks": [ { - "name": "perf_ task_1", - "taskReferenceName": "perf_ task_1", + "name": "task_1", + "taskReferenceName": "task_1", "inputParameters": { "mod": "${workflow.input.mod}", "oddEven": "${workflow.input.oddEven}" }, "type": "SIMPLE" }, + { + "name": "event_task", + "taskReferenceName": "event_0", + "inputParameters": { + "mod": "${workflow.input.mod}", + "oddEven": "${workflow.input.oddEven}" + }, + "type": "EVENT", + "sink": "conductor" + }, { "name": "dyntask", - "taskReferenceName": "perf_ task_2", + "taskReferenceName": "task_2", "inputParameters": { "taskToExecute": "${workflow.input.task2Name}" }, @@ -30,18 +40,18 @@ An example kitchensink workflow that demonstrates the usage of all the schema co "name": "oddEvenDecision", "taskReferenceName": "oddEvenDecision", "inputParameters": { - "oddEven": "${perf_ task_2.output.oddEven}" + "oddEven": "${task_2.output.oddEven}" }, "type": "DECISION", "caseValueParam": "oddEven", "decisionCases": { "0": [ { - "name": "perf_ task_4", - "taskReferenceName": "perf_ task_4", + "name": "task_4", + "taskReferenceName": "task_4", "inputParameters": { - "mod": "${perf_ task_2.output.mod}", - "oddEven": "${perf_ task_2.output.oddEven}" + "mod": "${task_2.output.mod}", + "oddEven": "${task_2.output.oddEven}" }, "type": "SIMPLE" }, @@ -49,8 +59,8 @@ An example kitchensink workflow that demonstrates the usage of all the schema co "name": "dynamic_fanout", "taskReferenceName": "fanout1", "inputParameters": { - "dynamicTasks": "${perf_ task_4.output.dynamicTasks}", - "input": "${perf_ task_4.output.inputs}" + "dynamicTasks": "${task_4.output.dynamicTasks}", + "input": "${task_4.output.inputs}" }, "type": "FORK_JOIN_DYNAMIC", "dynamicForkTasksParam": "dynamicTasks", @@ -70,16 +80,16 @@ An example kitchensink workflow that demonstrates the usage of all the schema co "forkTasks": [ [ { - "name": "perf_ task_10", - "taskReferenceName": "perf_ task_10", + "name": "task_10", + "taskReferenceName": "task_10", "type": "SIMPLE" }, { "name": "sub_workflow_x", "taskReferenceName": "wf3", "inputParameters": { - "mod": "${perf_ task_1.output.mod}", - "oddEven": "${perf_ task_1.output.oddEven}" + "mod": "${task_1.output.mod}", + "oddEven": "${task_1.output.oddEven}" }, "type": "SUB_WORKFLOW", "subWorkflowParam": { @@ -90,16 +100,16 @@ An example kitchensink workflow that demonstrates the usage of all the schema co ], [ { - "name": "perf_ task_11", - "taskReferenceName": "perf_ task_11", + "name": "task_11", + "taskReferenceName": "task_11", "type": "SIMPLE" }, { "name": "sub_workflow_x", "taskReferenceName": "wf4", "inputParameters": { - "mod": "${perf_ task_1.output.mod}", - "oddEven": "${perf_ task_1.output.oddEven}" + "mod": "${task_1.output.mod}", + "oddEven": "${task_1.output.oddEven}" }, "type": "SUB_WORKFLOW", "subWorkflowParam": { @@ -127,18 +137,18 @@ An example kitchensink workflow that demonstrates the usage of all the schema co "taskReferenceName": "get_es_1", "inputParameters": { "http_request": { - "uri": "http://localhost:9200/wfe/workflow/_search?size=10", + "uri": "http://localhost:9200/conductor/_search?size=10", "method": "GET" } }, "type": "HTTP" }, { - "name": "perf_task_30", - "taskReferenceName": "perf_task_30", + "name": "task_30", + "taskReferenceName": "task_30", "inputParameters": { "statuses": "${get_es_1.output..status}", - "fistWorkflowId": "${get_es_1.output.workflowId[0]}" + "workflowIds": "${get_es_1.output..workflowId}" }, "type": "SIMPLE" } @@ -168,9 +178,9 @@ curl -X POST --header 'Content-Type: application/json' --header 'Accept: text/pl } ' ``` -The response is a text string identifyin the workflow instance id. +The response is a text string identifying the workflow instance id. -#### Poll for the fist task: +#### Poll for the first task: ```shell curl http://localhost:8080/api/tasks/poll/task_1 @@ -246,4 +256,4 @@ This will mark the task_1 as completed and schedule ```task_5``` as the next tas Repeat the same process for the subsequently scheduled tasks until the completion. !!! hint "Using Client Libraries" - Conductor provides client libaraies in Java (a Python client is works) to simplify task polling and execution. + Conductor provides client libraries in Java (a Python client is works) to simplify task polling and execution. diff --git a/docs/docs/metadata/systask.md b/docs/docs/metadata/systask.md index 83f8791f79..3ddcb6244b 100644 --- a/docs/docs/metadata/systask.md +++ b/docs/docs/metadata/systask.md @@ -21,7 +21,7 @@ If the workflow is started with input parameter user_supplied_task's value as __user_task_2__, Conductor will schedule __user_task_2__ when scheduling this dynamic task. # Decision -A decision task is similar to ```case...switch``` statement in a programming langugage. +A decision task is similar to ```case...switch``` statement in a programming language. The task takes 3 parameters: ### Parameters: @@ -128,7 +128,7 @@ A dynamic fork is same as FORK_JOIN task. Except that the list of tasks to be f |name|description| |---|---| | dynamicForkTasksParam |Name of the parameter that contains list of workflow task configuration to be executed in parallel| -|dynamicForkTasksInputParamName|Name of the parameter whose value should be a map with key as forked task's referece name and value as input the forked task| +|dynamicForkTasksInputParamName|Name of the parameter whose value should be a map with key as forked task's reference name and value as input the forked task| ###Example @@ -177,7 +177,7 @@ Consider **taskA**'s output as: ``` When executed, the dynamic fork task will schedule two parallel task of type "encode_task" with reference names "forkedTask1" and "forkedTask2" and inputs as specified by _ dynamicTasksInputJSON_ -!!!warning "Dyanmic Fork and Join" +!!!warning "Dynamic Fork and Join" **A Join task MUST follow FORK_JOIN_DYNAMIC** Workflow definition MUST include a Join task definition followed by FORK_JOIN_DYNAMIC task. However, given the dynamic nature of the task, no joinOn parameters are required for this Join. The join will wait for ALL the forked branches to complete before completing. @@ -238,7 +238,7 @@ To use a wait task, set the task type as ```WAIT``` ### Parameters None required. -### Exernal Triggers for Wait Task +### External Triggers for Wait Task Task Resource endpoint can be used to update the status of a task to a terminate state. @@ -277,7 +277,7 @@ The task expects an input parameter named ```http_request``` as part of the task |---|---| | uri |URI for the service. Can be a partial when using vipAddress or includes the server address.| |method|HTTP method. One of the GET, PUT, POST, DELETE, OPTIONS, HEAD| -|accept|Accpet header as required by server.| +|accept|Accept header as required by server.| |contentType|Content Type - supported types are text/plain, text/html and, application/json| |headers|A map of additional http headers to be sent along with the request.| |body|Request body| @@ -319,4 +319,41 @@ Task Input using an absolute URL The task is marked as ```FAILED``` if the request cannot be completed or the remote server returns non successful status code. !!!note - HTTP task currently only supports Content-Type as application/json and is able to parse the text as well as JSON response. XML input/output is currently not supported. However, if the response cannot be parsed as JSON or Text, a string representation is stored as a text value. \ No newline at end of file + HTTP task currently only supports Content-Type as application/json and is able to parse the text as well as JSON response. XML input/output is currently not supported. However, if the response cannot be parsed as JSON or Text, a string representation is stored as a text value. + +# Event +Event task provides ability to publish an event (message) to either Conductor or an external eventing system like SQS. Event tasks are useful for creating event based dependencies for workflows and tasks. + +### Parameters +|name|description| +|---|---| +| sink |Qualified name of the event that is produced. e.g. conductor or sqs:sqs_queue_name| + + +### Example + +``` json +{ + "sink": 'sqs:example_sqs_queue_name' +} +``` + +When producing an event with Conductor as sink, the event name follows the structure: +```conductor::``` + +For SQS, use the **name** of the queue and NOT the URI. Conductor looks up the URI based on the name. + +!!!warning + When using SQS add the [ContribsModule](https://github.com/Netflix/conductor/blob/master/contribs/src/main/java/com/netflix/conductor/contribs/ContribsModule.java) to the deployment. The module needs to be configured with AWSCredentialsProvider for Conductor to be able to use AWS APIs. + +### Supported Sinks +* Conductor +* SQS + + +### Event Task Input +The input given to the event task is made available to the published message as payload. e.g. if a message is put into SQS queue (sink is sqs) then the message payload will be the input to the task. + + +### Event Task Output +`event_produced` Name of the event produced. \ No newline at end of file diff --git a/docs/docs/metrics/client.md b/docs/docs/metrics/client.md index 3b035679f8..4872127b93 100644 --- a/docs/docs/metrics/client.md +++ b/docs/docs/metrics/client.md @@ -6,7 +6,7 @@ When using the Java client, the following metrics are published: | ------------- |:-------------| -----| | task_execution_queue_full | Counter to record execution queue has saturated | taskType| | task_poll_error | Client error when polling for a task queue | taskType, includeRetries, status | -| task_execute_error | Excution error | taskType| +| task_execute_error | Execution error | taskType| | task_ack_failed | Task ack failed | taskType | | task_ack_error | Task ack has encountered an exception | taskType | | task_update_error | Task status cannot be updated back to server | taskType | diff --git a/docs/docs/runtime/index.md b/docs/docs/runtime/index.md index f666929c8c..1ffa4269d1 100644 --- a/docs/docs/runtime/index.md +++ b/docs/docs/runtime/index.md @@ -1,13 +1,155 @@ -# Metadata -Workflow blueprints are managed via `/metadata` resource endpoints. +## Task & Workflow Metadata +| Endpoint | Description | Input| +| ------------- |:-------------|---| +| `GET /metadata/taskdefs` | Get all the task definitions| n/a| +| `GET /metadata/taskdefs/{taskType}` | Retrieve task definition| Task Name| +| `POST /metadata/taskdefs` | Register new task definitions| List of [Task Definitions](/metadata/#task-definition)| +| `PUT /metadata/taskdefs` | Update a task definition| A [Task Definition](/metadata/#task-definition)| +| `DELETE /metadata/taskdefs/{taskType}` | Delete a task definition| Task Name| +||| +| `GET /metadata/workflow` | Get all the workflow definitions| n/a| +| `POST /metadata/workflow` | Register new workflow| [Workflow Definition](/metadata/#workflow-definition)| +| `PUT /metadata/workflow` | Register/Update new workflows| List of [Workflow Definition](/metadata/#workflow-definition)| +| `GET /metadata/workflow/{name}?version=` | Get the workflow definitions| workflow name, version (optional)| +||| -# Instances -Each running workflow instance is identified by a unique instance id. This id is used for managing the lifecycle of workflow instance. The following operations are possible: - -1. Start a workflow -2. Terminate -2. Restart -3. Pause -4. Resume -5. Rerun -6. Search for workflows +## Start A Workflow + +``` +POST /workflow/{name}?version=&correlationId= +{ + //JSON payload for workflow +} +``` +|Parameter|Description| +|---|---| +|version|Optional. If not specified uses the latest version of the workflow| +|correlationId|User supplied Id that can be used to retrieve workflows| + +#### Input +JSON Payload to start the workflow. Mandatory. If workflow does not expect any input MUST pass an empty JSON like `{}` + +#### Output +Id of the workflow (GUID) + +## Retrieve Workflows +|Endpoint|Description| +|---|---| +|`GET /workflow/{workflowId}?includeTasks=true|false`|Get Workflow State by workflow Id. If includeTasks is set, then also includes all the tasks executed and scheduled.| +|`GET /workflow/running/{name}`|Get all the running workflows of a given type| +|`GET /workflow/running/{name}/correlated/{correlationId}?includeClosed=true|false&includeTasks=true|false`|Get all the running workflows filtered by correlation Id. If includeClosed is set, also includes workflows that have completed running.| +|`GET /workflow/search`|Search for workflows. See Below.| +||| + + +## Search for Workflows +Conductor uses Elasticsearch for indexing workflow execution and is used by search APIs. + +`GET /workflow/search?start=&size=&sort=&freeText=&query=` + +|Parameter|Description| +|---|---| +|start|Page number. Defaults to 0| +|size|Number of results to return| +|sort|Sorting. Format is: `ASC:` or `DESC:` to sort in ascending or descending order by a field| +|freeText|Elasticsearch supported query. e.g. workflowType:"name_of_workflow"| +|query|SQL like where clause. e.g. workflowType = 'name_of_workflow'. Optional if freeText is provided.| + +### Output +Search result as described below: +```json +{ + "totalHits": 0, + "results": [ + { + "workflowType": "string", + "version": 0, + "workflowId": "string", + "correlationId": "string", + "startTime": "string", + "updateTime": "string", + "endTime": "string", + "status": "RUNNING", + "input": "string", + "output": "string", + "reasonForIncompletion": "string", + "executionTime": 0, + "event": "string" + } + ] +} +``` + +## Manage Workflows +|Endpoint|Description| +|---|---| +|`PUT /workflow/{workflowId}/pause`|Pause. No further tasks will be scheduled until resumed. Currently running tasks are not paused.| +|`PUT /workflow/{workflowId}/resume`|Resume normal operations after a pause.| +|`POST /workflow/{workflowId}/rerun`|See Below.| +|`POST /workflow/{workflowId}/restart`|Restart workflow execution from the start. Current execution history is wiped out.| +|`POST /workflow/{workflowId}/retry`|Retry the last failed task.| +|`PUT /workflow/{workflowId}/skiptask/{taskReferenceName}`|See below.| +|`DELETE /workflow/{workflowId}`|Terminates the running workflow.| +|`DELETE /workflow/{workflowId}/remove`|Deletes the workflow from system. Use with caution.| + +### Rerun +Re-runs a completed workflow from a specific task. + +`POST /workflow/{workflowId}/rerun` + +```json +{ + "reRunFromWorkflowId": "string", + "workflowInput": {}, + "reRunFromTaskId": "string", + "taskInput": {} +} +``` + +###Skip Task + +Skips a task execution (specified as `taskReferenceName` parameter) in a running workflow and continues forward. +Optionally updating task's input and output as specified in the payload. +`PUT /workflow/{workflowId}/skiptask/{taskReferenceName}?workflowId=&taskReferenceName=` +```json +{ + "taskInput": {}, + "taskOutput": {} +} +``` + +## Manage Tasks +|Endpoint|Description| +|---|---| +|`GET /tasks/{taskId}`|Get task details.| +|`GET /tasks/queue/all`|List the pending task sizes.| +|`GET /tasks/queue/all/verbose`|Same as above, includes the size per shard| +|`GET /tasks/queue/sizes?taskType=&taskType=&taskType`|Return the size of pending tasks for given task types| +||| + +## Polling, Ack and Update Task +These are critical endpoints used to poll for task, send ack (after polling) and finally updating the task result by worker. + +|Endpoint|Description| +|---|---| +|`GET /tasks/poll/{taskType}`| Poll for a task| +|`GET /tasks/poll/batch/{taskType}?count=&timeout=`| Poll for a task in a batch specified by `count`. This is a long poll and the connection will wait until `timeout` or if there is at-least 1 item available, whichever comes first.| +|`POST /tasks`| Update the result of task execution. See the schema below.| +|`POST /tasks/{taskId}/ack`| Acknowledges the task received AFTER poll by worker.| + +### Schema for updating Task Result +```json +{ + "workflowInstanceId": "Workflow Instance Id", + "taskId": "ID of the task to be updated", + "reasonForIncompletion" : "If failed, reason for failure", + "callbackAfterSeconds": 0, + "status": "IN_PROGRESS|FAILED|COMPLETED", + "outputData": { + //JSON document representing Task execution output + } + +} +``` +!!!info "Acknowledging tasks after poll" + If the worker fails to ack the task after polling, the task is re-queued and put back in queue and is made available during subsequent poll. diff --git a/docs/docs/server/index.md b/docs/docs/server/index.md index 4a9a4e1c2e..6d2d58a906 100644 --- a/docs/docs/server/index.md +++ b/docs/docs/server/index.md @@ -17,7 +17,7 @@ java -jar conductor-server-VERSION-all.jar ``` #### 2. Download pre-built binaries from jcenter or maven central -Use the following coordiates: +Use the following coordinates: |group|artifact|version |---|---|---| @@ -37,7 +37,7 @@ After the docker images are built, run the following command to start the contai docker-compose up ``` -This will create a docker container network that consists of the following images: conductor:server, conductor:ui, [elasticsearch:2.4](https://hub.docker.com/_/elasticsearch/), and [v1r3n/dynomite:latest](https://hub.docker.com/r/v1r3n/dynomite/). +This will create a docker container network that consists of the following images: conductor:server, conductor:ui, [elasticsearch:2.4](https://hub.docker.com/_/elasticsearch/), and dynomite. To view the UI, navigate to [localhost:5000](http://localhost:5000/), to view the Swagger docs, navigate to [localhost:8080](http://localhost:8080/). @@ -53,7 +53,7 @@ log4j.properties file path is optional and allows finer control over the logging ```properties # Database persistence model. Possible values are memory, redis, and dynomite. -# If ommitted, the persistence used is memory +# If omitted, the persistence used is memory # # memory : The data is stored in memory and lost when the server dies. Useful for testing or demo # redis : non-Dynomite based redis instance diff --git a/docs/docs/worker/index.md b/docs/docs/worker/index.md index ccddb1e4b1..1d39d58229 100644 --- a/docs/docs/worker/index.md +++ b/docs/docs/worker/index.md @@ -1,8 +1,8 @@ Conductor tasks executed by remote workers communicates over HTTP endpoints to poll for the task and updates the status of the execution. -Conductor provides a framework to poll for tasks, manage the execution thread and update the status of the execution back to the server. The framework provides libraries in Java and Python. Other langugage support can be added by using the HTTP endpoints for task management. +Conductor provides a framework to poll for tasks, manage the execution thread and update the status of the execution back to the server. The framework provides libraries in Java and Python. Other language support can be added by using the HTTP endpoints for task management. -### Java +## Java 1. Implement [Worker](https://github.com/Netflix/conductor/blob/dev/client/src/main/java/com/netflix/conductor/client/worker/Worker.java) interface to implement the task. 2. Use [WorkflowTaskCoordinator](https://github.com/Netflix/conductor/blob/dev/client/src/main/java/com/netflix/conductor/client/task/WorkflowTaskCoordinator.java) to register the worker(s) and initialize the polling loop. @@ -10,7 +10,26 @@ Conductor provides a framework to poll for tasks, manage the execution thread an * [Sample Worker Implementation](https://github.com/Netflix/conductor/blob/dev/client/src/test/java/com/netflix/conductor/client/sample/SampleWorker.java) * [Example](https://github.com/Netflix/conductor/blob/dev/client/src/test/java/com/netflix/conductor/client/sample/Main.java) -### Python +###WorkflowTaskCoordinator +Manages the Task workers thread pool and server communication (poll, task update and ack). + +###Worker +|Property|Description| +|---|---| +|paused|boolean. If set to true, the worker stops polling.| +|pollCount|No. of tasks to poll for. Used for batched polling. Each task is executed in a separate thread.| +|longPollTimeout|Time in millisecond for long polling to Conductor server for tasks| +|| + +These properties can be set either by Worker implementation or by setting the following system properties in the JVM: + +||| +|---|---| +|`conductor.worker.`|Applies to ALL the workers in the JVM| +|`conductor.worker..`|Applies to the specified worker. Overrides the global property.| + + +## Python [https://github.com/Netflix/conductor/tree/dev/client/python](https://github.com/Netflix/conductor/tree/dev/client/python) Follow the example as documented in the readme or take a look at [kitchensink_workers.py](https://github.com/Netflix/conductor/blob/dev/client/python/kitchensink_workers.py) diff --git a/docs/kitchensink.json b/docs/kitchensink.json index 7f0d2c7059..5e1e50de85 100644 --- a/docs/kitchensink.json +++ b/docs/kitchensink.json @@ -8,9 +8,8 @@ "taskReferenceName": "get_es_0", "inputParameters": { "http_request": { - "uri": "http://localhost:9200/wfe_perf/workflow/_search?q=status:COMPLETED&size=10", - "method": "GET", - "vipAddress": "es_cpe_wfe.us-east-1.cloud.netflix.com" + "uri": "http://localhost:9200/conductor/workflow/_search?q=status:COMPLETED&size=10", + "method": "GET" } }, "type": "HTTP" @@ -134,7 +133,7 @@ "taskReferenceName": "get_es_1", "inputParameters": { "http_request": { - "uri": "http://es_cpe_wfe.us-east-1.dyntest.netflix.net:7104/wfe_perf/workflow/_search?q=status:COMPLETED&size=10", + "uri": "http://localhost:9200/conductor/workflow/_search?q=status:COMPLETED&size=10", "method": "GET" } }, diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 9f8f023e92..8ea6f55194 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -9,14 +9,16 @@ pages: - 'Basic Concepts': intro/concepts.md - 'Metadata Definitions': metadata/index.md - 'System Tasks': metadata/systask.md +- 'Event Handlers': events/index.md - 'Kitchensink Example': metadata/kitchensink.md - 'Conductor Server': server/index.md - 'Conductor Task Workers': worker/index.md - 'Extending Conductor': extend/index.md -- 'Workflow Management': runtime/index.md +- 'APIs': runtime/index.md - 'Workflow Metrics': - 'Server Metrics': metrics/index.md - 'Worker Metrics': metrics/client.md +- 'FAQ': faq.md - License: 'license.md' theme: readthedocs extra_css: diff --git a/jersey/src/main/java/com/netflix/conductor/server/resources/AdminResource.java b/jersey/src/main/java/com/netflix/conductor/server/resources/AdminResource.java index 17da3369f4..dcf0470e43 100644 --- a/jersey/src/main/java/com/netflix/conductor/server/resources/AdminResource.java +++ b/jersey/src/main/java/com/netflix/conductor/server/resources/AdminResource.java @@ -18,8 +18,10 @@ */ package com.netflix.conductor.server.resources; +import java.io.InputStream; import java.util.List; import java.util.Map; +import java.util.Properties; import javax.inject.Inject; import javax.inject.Singleton; @@ -33,6 +35,9 @@ import javax.ws.rs.QueryParam; import javax.ws.rs.core.MediaType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.core.config.Configuration; import com.netflix.conductor.core.execution.WorkflowExecutor; @@ -53,17 +58,36 @@ @Singleton public class AdminResource { + private static Logger logger = LoggerFactory.getLogger(AdminResource.class); + private Configuration config; private ExecutionService service; private QueueDAO queue; + private String version; + + private String buildDate; + @Inject public AdminResource(Configuration config, ExecutionService service, QueueDAO queue) { this.config = config; this.service = service; this.queue = queue; + this.version = "UNKNOWN"; + this.buildDate = "UNKNOWN"; + + try { + + InputStream propertiesIs = this.getClass().getClassLoader().getResourceAsStream("META-INF/conductor-core.properties"); + Properties prop = new Properties(); + prop.load(propertiesIs); + this.version = prop.getProperty("Implementation-Version"); + this.buildDate = prop.getProperty("Build-Date"); + }catch(Exception e) { + logger.error(e.getMessage(), e); + } } @ApiOperation(value = "Get all the configuration parameters") @@ -72,7 +96,10 @@ public AdminResource(Configuration config, ExecutionService service, QueueDAO qu @Produces(MediaType.APPLICATION_JSON) @Path("/config") public Map getAllConfig() { - return config.getAll(); + Map map = config.getAll(); + map.put("version", version); + map.put("buildDate", buildDate); + return map; } diff --git a/jersey/src/main/java/com/netflix/conductor/server/resources/EventResource.java b/jersey/src/main/java/com/netflix/conductor/server/resources/EventResource.java index 15bf0e6559..3bb49cf8ee 100644 --- a/jersey/src/main/java/com/netflix/conductor/server/resources/EventResource.java +++ b/jersey/src/main/java/com/netflix/conductor/server/resources/EventResource.java @@ -35,11 +35,9 @@ import javax.ws.rs.QueryParam; import javax.ws.rs.core.MediaType; -import com.netflix.conductor.common.metadata.events.EventExecution; import com.netflix.conductor.common.metadata.events.EventHandler; import com.netflix.conductor.core.events.EventProcessor; import com.netflix.conductor.core.events.EventQueues; -import com.netflix.conductor.service.ExecutionService; import com.netflix.conductor.service.MetadataService; import io.swagger.annotations.Api; @@ -61,13 +59,10 @@ public class EventResource { private EventProcessor ep; - private ExecutionService es; - @Inject - public EventResource(MetadataService service, EventProcessor ep, ExecutionService es) { + public EventResource(MetadataService service, EventProcessor ep) { this.service = service; this.ep = ep; - this.es = es; } @POST @@ -117,13 +112,4 @@ public List getEventQueueProviders() { return EventQueues.providers(); } - @GET - @Path("/executions/{eventHandlerName}/{eventName}/{messageId}") - @ApiOperation("Get Event executions") - public List getEventExecutions( - @PathParam("eventHandlerName") String eventHandlerName, @PathParam("eventName") String eventName, @PathParam("messageId") String messageId, - @QueryParam("max") @DefaultValue("100") int max) { - return es.getEventExecutions(eventHandlerName, eventName, messageId, max); - } - } diff --git a/jersey/src/main/java/com/netflix/conductor/server/resources/TaskResource.java b/jersey/src/main/java/com/netflix/conductor/server/resources/TaskResource.java index be3374b16f..11ebea96e0 100644 --- a/jersey/src/main/java/com/netflix/conductor/server/resources/TaskResource.java +++ b/jersey/src/main/java/com/netflix/conductor/server/resources/TaskResource.java @@ -141,10 +141,11 @@ public void remvoeTaskFromQueue(@PathParam("taskType") String taskType, @PathPar taskService.removeTaskfromQueue(taskType, taskId); } - @POST + @GET @Path("/queue/sizes") @ApiOperation("Get Task type queue sizes") - public Map size(List taskTypes) throws Exception { + @Consumes({ MediaType.WILDCARD }) + public Map size(@QueryParam("taskType") List taskTypes) throws Exception { return taskService.getTaskQueueSizes(taskTypes); } diff --git a/jersey/src/main/java/com/netflix/conductor/server/resources/WorkflowResource.java b/jersey/src/main/java/com/netflix/conductor/server/resources/WorkflowResource.java index 7753be64ec..18a77ee50f 100644 --- a/jersey/src/main/java/com/netflix/conductor/server/resources/WorkflowResource.java +++ b/jersey/src/main/java/com/netflix/conductor/server/resources/WorkflowResource.java @@ -93,7 +93,7 @@ public String startWorkflow ( if(def == null){ throw new ApplicationException(Code.NOT_FOUND, "No such workflow found by name=" + name + ", version=" + version); } - return executor.startWorkflow(def.getName(), def.getVersion(), correlationId, input); + return executor.startWorkflow(def.getName(), def.getVersion(), correlationId, input, null); } @GET diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/RedisExecutionDAO.java b/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/RedisExecutionDAO.java index 6cf24ac12b..94623ff43e 100644 --- a/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/RedisExecutionDAO.java +++ b/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/RedisExecutionDAO.java @@ -238,7 +238,7 @@ public String updateWorkflow(Workflow workflow) { workflow.setUpdateTime(System.currentTimeMillis()); return insertOrUpdateWorkflow(workflow, true); } - + @Override public void removeWorkflow(String workflowId) { diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dao/index/ElasticSearchDAO.java b/redis-persistence/src/main/java/com/netflix/conductor/dao/index/ElasticSearchDAO.java index ea2830d234..6a2ac30ac2 100644 --- a/redis-persistence/src/main/java/com/netflix/conductor/dao/index/ElasticSearchDAO.java +++ b/redis-persistence/src/main/java/com/netflix/conductor/dao/index/ElasticSearchDAO.java @@ -97,6 +97,8 @@ public class ElasticSearchDAO implements IndexDAO { private String indexName; private String logIndexName; + + private String logIndexPrefix; private ObjectMapper om; @@ -129,8 +131,8 @@ public ElasticSearchDAO(Client client, Configuration config, ObjectMapper om) { } private void updateIndexName(Configuration config) { - String prefix = config.getProperty("workflow.elasticsearch.tasklog.index.name", "task_log"); - this.logIndexName = prefix + "_" + sdf.format(new Date()); + this.logIndexPrefix = config.getProperty("workflow.elasticsearch.tasklog.index.name", "task_log"); + this.logIndexName = this.logIndexPrefix + "_" + sdf.format(new Date()); try { client.admin().indices().prepareGetIndex().addIndices(logIndexName).execute().actionGet(); @@ -330,7 +332,7 @@ public SearchResult searchWorkflows(String query, String freeText, int s throw new ApplicationException(Code.BACKEND_ERROR, e.getMessage(), e); } } - + @Override public void remove(String workflowId) { try { @@ -393,4 +395,6 @@ private SearchResult search(String structuredQuery, int start, int size, long count = response.getHits().getTotalHits(); return new SearchResult(count, result); } + + } diff --git a/server/.gitignore b/server/.gitignore new file mode 100644 index 0000000000..ae3c172604 --- /dev/null +++ b/server/.gitignore @@ -0,0 +1 @@ +/bin/ diff --git a/server/build.gradle b/server/build.gradle index 738906541a..c377b3b653 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -1,3 +1,9 @@ +buildscript { + dependencies { + classpath "org.akhikhl.gretty:gretty:1.2.4" + } +} + plugins { id 'com.github.johnrengelman.shadow' version '1.2.3' } @@ -8,6 +14,9 @@ configurations.all { } } +apply plugin: 'war' +apply plugin: "org.akhikhl.gretty" + dependencies { //Conductor @@ -23,7 +32,7 @@ dependencies { //Guice compile 'com.sun.jersey.contribs:jersey-guice:1.19.+' compile 'com.google.inject:guice:4.+' - compile 'com.google.inject.extensions:guice-servlet:4.+' + compile 'com.google.inject.extensions:guice-servlet:4.1.+' //Swagger compile 'io.swagger:swagger-jersey-jaxrs:1.5.0' @@ -48,6 +57,20 @@ publishing { } } +gretty { + contextPath = '/' + servletContainer = 'tomcat8' + scanDirs = ['**/src/main/resources/**'] + scanDependencies = true + port = 8080 + // More properties can be found here: + // http://akhikhl.github.io/gretty-doc/Gretty-configuration.html +} +configurations.grettyRunnerTomcat8 { + exclude group: 'org.slf4j', module: 'log4j-over-slf4j' +} + + build.dependsOn('shadowJar') task server(type: JavaExec) { diff --git a/server/src/main/java/com/netflix/conductor/server/ConductorServer.java b/server/src/main/java/com/netflix/conductor/server/ConductorServer.java index d63f250eff..d922c58fd7 100644 --- a/server/src/main/java/com/netflix/conductor/server/ConductorServer.java +++ b/server/src/main/java/com/netflix/conductor/server/ConductorServer.java @@ -51,7 +51,6 @@ import com.netflix.dyno.jedis.DynoJedisClient; import com.sun.jersey.api.client.Client; -import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisCommands; /** @@ -124,14 +123,7 @@ public Collection getHosts() { JedisCommands jedis = null; switch(db) { - case redis: - - String host = dynoHosts.get(0).getHostName(); - int port = dynoHosts.get(0).getPort(); - jedis = new Jedis(host, port); - logger.info("Starting conductor server using standalone redis on " + host + ":" + port); - break; - + case redis: case dynomite: ConnectionPoolConfigurationImpl cp = new ConnectionPoolConfigurationImpl(dynoClusterName).withTokenSupplier(new TokenMapSupplier() { @@ -161,7 +153,6 @@ public HostToken getTokenForHost(Host host, Set activeHosts) { break; case memory: - jedis = new JedisMock(); try { EmbeddedElasticSearch.start(); @@ -181,6 +172,10 @@ public HostToken getTokenForHost(Host host, Set activeHosts) { this.sm = new ServerModule(jedis, hs, cc); } + public ServerModule getGuiceModule() { + return sm; + } + public synchronized void start(int port, boolean join) throws Exception { if(server != null) { diff --git a/server/src/main/java/com/netflix/conductor/server/ServerModule.java b/server/src/main/java/com/netflix/conductor/server/ServerModule.java index cb54d3b0e1..45d0879f69 100644 --- a/server/src/main/java/com/netflix/conductor/server/ServerModule.java +++ b/server/src/main/java/com/netflix/conductor/server/ServerModule.java @@ -25,7 +25,6 @@ import com.google.inject.AbstractModule; import com.google.inject.Provides; -import com.netflix.conductor.contribs.ContribsModule; import com.netflix.conductor.contribs.http.HttpTask; import com.netflix.conductor.contribs.http.RestClientManager; import com.netflix.conductor.core.config.Configuration; diff --git a/server/src/main/java/com/netflix/conductor/server/ServletContextListner.java b/server/src/main/java/com/netflix/conductor/server/ServletContextListner.java new file mode 100644 index 0000000000..6cd437704d --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/server/ServletContextListner.java @@ -0,0 +1,96 @@ +/** + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * + */ +package com.netflix.conductor.server; + +import java.io.FileInputStream; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; + +import org.apache.log4j.PropertyConfigurator; +import org.eclipse.jetty.servlet.DefaultServlet; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.servlet.GuiceServletContextListener; +import com.google.inject.servlet.ServletModule; + +/** + * @author Viren + * + */ +public class ServletContextListner extends GuiceServletContextListener { + + @Override + protected Injector getInjector() { + + loadProperties(); + + ConductorConfig config = new ConductorConfig(); + ConductorServer server = new ConductorServer(config); + + return Guice.createInjector(server.getGuiceModule(), getSwagger()); + } + + private ServletModule getSwagger() { + + String resourceBasePath = ServletContextListner.class.getResource("/swagger-ui").toExternalForm(); + DefaultServlet ds = new DefaultServlet(); + + ServletModule sm = new ServletModule() { + @Override + protected void configureServlets() { + Map params = new HashMap<>(); + params.put("resourceBase", resourceBasePath); + params.put("redirectWelcome", "true"); + serve("/*").with(ds, params); + } + }; + + return sm; + + } + + private void loadProperties() { + try { + + String key = "conductor_properties"; + String propertyFile = Optional.ofNullable(System.getProperty(key)).orElse(System.getenv(key)); + if(propertyFile != null) { + System.out.println("Using " + propertyFile); + FileInputStream propFile = new FileInputStream(propertyFile); + Properties props = new Properties(System.getProperties()); + props.load(propFile); + System.setProperties(props); + } + + key = "log4j_properties"; + String log4jConfig = Optional.ofNullable(System.getProperty(key)).orElse(System.getenv(key)); + if(log4jConfig != null) { + PropertyConfigurator.configure(new FileInputStream(log4jConfig)); + } + + } catch (Exception e) { + System.err.println("Error loading properties " + e.getMessage()); + e.printStackTrace(); + } + } + +} diff --git a/server/src/main/resources/kitchensink.json b/server/src/main/resources/kitchensink.json index cc7f85331a..86b902d056 100644 --- a/server/src/main/resources/kitchensink.json +++ b/server/src/main/resources/kitchensink.json @@ -12,6 +12,16 @@ }, "type": "SIMPLE" }, + { + "name": "event_task", + "taskReferenceName": "event_0", + "inputParameters": { + "mod": "${workflow.input.mod}", + "oddEven": "${workflow.input.oddEven}" + }, + "type": "EVENT", + "sink": "conductor" + }, { "name": "dyntask", "taskReferenceName": "task_2", diff --git a/server/src/main/resources/server.properties b/server/src/main/resources/server.properties index e3c6c2406d..7d01dbd42b 100644 --- a/server/src/main/resources/server.properties +++ b/server/src/main/resources/server.properties @@ -31,7 +31,10 @@ queues.dynomite.threads=10 queues.dynomite.nonQuorum.port=22122 #Transport address to elasticsearch -workflow.elasticsearch.url=es_cpe_wfe.us-east-1.dynprod.netflix.net:7102 +workflow.elasticsearch.url=localhost:9003 #Name of the elasticsearch cluster -workflow.elasticsearch.index.name=wfe \ No newline at end of file +workflow.elasticsearch.index.name=conductor + +# For a single node dynomite or redis server, make sure the value below is set to same as rack specified in the "workflow.dynomite.cluster.hosts" property. +EC2_AVAILABILTY_ZONE=us-east-1c diff --git a/server/src/main/webapp/WEB-INF/web.xml b/server/src/main/webapp/WEB-INF/web.xml new file mode 100644 index 0000000000..b898fea90c --- /dev/null +++ b/server/src/main/webapp/WEB-INF/web.xml @@ -0,0 +1,17 @@ + + + + + guiceFilter + com.google.inject.servlet.GuiceFilter + + + + guiceFilter + /* + + + + com.netflix.conductor.server.ServletContextListner + + diff --git a/server/src/main/webapp/favicon.ico b/server/src/main/webapp/favicon.ico new file mode 100644 index 0000000000..b083672203 Binary files /dev/null and b/server/src/main/webapp/favicon.ico differ diff --git a/test-harness/.gitignore b/test-harness/.gitignore new file mode 100644 index 0000000000..ae3c172604 --- /dev/null +++ b/test-harness/.gitignore @@ -0,0 +1 @@ +/bin/ diff --git a/test-harness/src/test/java/com/netflix/conductor/tests/integration/WorkflowServiceTest.java b/test-harness/src/test/java/com/netflix/conductor/tests/integration/WorkflowServiceTest.java index 7f7c5ebdd5..d76a4420c3 100644 --- a/test-harness/src/test/java/com/netflix/conductor/tests/integration/WorkflowServiceTest.java +++ b/test-harness/src/test/java/com/netflix/conductor/tests/integration/WorkflowServiceTest.java @@ -52,6 +52,7 @@ import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.tasks.TaskDef.RetryLogic; import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy; +import com.netflix.conductor.common.metadata.tasks.TaskResult; import com.netflix.conductor.common.metadata.workflow.DynamicForkJoinTaskList; import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest; import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams; @@ -62,7 +63,6 @@ import com.netflix.conductor.common.run.Workflow.WorkflowStatus; import com.netflix.conductor.core.WorkflowContext; import com.netflix.conductor.core.execution.ApplicationException; -import com.netflix.conductor.core.execution.DeciderService; import com.netflix.conductor.core.execution.SystemTaskType; import com.netflix.conductor.core.execution.WorkflowExecutor; import com.netflix.conductor.core.execution.WorkflowSweeper; @@ -96,8 +96,6 @@ public class WorkflowServiceTest { @Inject private MetadataService ms; - @Inject - private DeciderService ds; @Inject private WorkflowSweeper sweeper; @@ -382,12 +380,12 @@ public void testForkJoin() throws Exception { wf = ess.getExecutionStatus(wfid, true); assertNotNull(wf); assertEquals("Found " + wf.getTasks(), WorkflowStatus.RUNNING, wf.getStatus()); - if(!wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t3"))){ - ds.decide(wfid, provider); + if (!wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t3"))) { + provider.decide(wfid); wf = ess.getExecutionStatus(wfid, true); assertNotNull(wf); }else { - ds.decide(wfid, provider); + provider.decide(wfid); } assertTrue("Found " + wf.getTasks().stream().map(t -> t.getTaskType()).collect(Collectors.toList()), wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t3"))); @@ -397,8 +395,8 @@ public void testForkJoin() throws Exception { assertTrue("Found " + wf.getTasks().stream().map(t -> t.getReferenceTaskName() + "." + t.getStatus()).collect(Collectors.toList()), wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t4"))); assertEquals("Found " + wf.getTasks().stream().map(t -> t.getTaskType()).collect(Collectors.toList()), 6, wf.getTasks().size()); - ds.decide(wfid, provider); - ds.decide(wfid, provider); + provider.decide(wfid); + provider.decide(wfid); wf = ess.getExecutionStatus(wfid, true); assertNotNull(wf); @@ -696,7 +694,7 @@ public void testDynamicForkJoin() throws Exception { es = ess.getExecutionStatus(wfid, true); assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); + assertEquals(es.getReasonForIncompletion(), WorkflowStatus.RUNNING, es.getStatus()); assertEquals(2, es.getTasks().stream().filter(t -> t.getTaskType().equals("junit_task_2")).count()); assertTrue(es.getTasks().stream().filter(t -> t.getTaskType().equals("junit_task_2")).allMatch(t -> t.getDynamicWorkflowTask() != null)); @@ -1011,7 +1009,6 @@ private void createConditionalWF() throws Exception { finalTask.getDecisionCases().put("notify", Arrays.asList(notifyTask)); def2.getTasks().add(finalTask ); - System.out.println(new ObjectMapper().writeValueAsString(def2)); ms.updateWorkflowDef(def2); } @@ -1145,7 +1142,7 @@ public void testLongRunning() throws Exception { String inputParam1 = "p1 value"; input.put("param1", inputParam1); input.put("param2", "p2 value"); - String wfid = provider.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId , input); + String wfid = provider.startWorkflow(LONG_RUNNING, 1, correlationId , input); System.out.println("testLongRunning.wfid=" + wfid); assertNotNull(wfid); @@ -1414,7 +1411,7 @@ private void validate(String wfid, String[] sequence, String[] executedTasks, in List workflowTasks = workflow.getTasks(); assertEquals(workflowTasks.toString(), executedTasks.length, workflowTasks.size()); for(int k = 0; k < executedTasks.length; k++){ - assertEquals(workflowTasks.toString(), executedTasks[k], workflowTasks.get(k).getTaskType()); + assertEquals("Tasks: " + workflowTasks.toString() + "\n", executedTasks[k], workflowTasks.get(k).getTaskType()); } assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); @@ -1640,12 +1637,17 @@ public void testDeciderUpdate() throws Exception { Workflow workflow = provider.getWorkflow(wfid, false); long updated1 = workflow.getUpdateTime(); Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); - ds.decide(wfid, provider); + provider.decide(wfid); workflow = provider.getWorkflow(wfid, false); long updated2 = workflow.getUpdateTime(); - assertTrue(updated2 > updated1); + assertEquals(updated1, updated2); + Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); provider.terminateWorkflow(wfid, "done"); + workflow = provider.getWorkflow(wfid, false); + updated2 = workflow.getUpdateTime(); + assertTrue("updated1[" + updated1 + "] >? updated2[" + updated2 + "]", updated2 > updated1); + } @Test @@ -1765,7 +1767,7 @@ public void testDeciderMix() throws Exception { List> futures = new LinkedList<>(); for(int i = 0; i < 10; i++){ futures.add(executors.submit(()->{ - ds.decide(wfid, provider); + provider.decide(wfid); return null; })); } @@ -1783,7 +1785,7 @@ public void testDeciderMix() throws Exception { // decideNow should be idempotent if re-run on the same state! - ds.decide(wfid, provider); + provider.decide(wfid); es = ess.getExecutionStatus(wfid, true); assertNotNull(es); assertEquals(WorkflowStatus.RUNNING, es.getStatus()); @@ -1828,7 +1830,7 @@ public void testDeciderMix() throws Exception { for(int i = 0; i < 10; i++){ futures.add(executors.submit(()->{ long s = System.currentTimeMillis(); - ds.decide(wfid, provider); + provider.decide(wfid); System.out.println("Took " + (System.currentTimeMillis()-s) + " ms to run decider"); return null; })); @@ -2113,7 +2115,7 @@ public void testTimeout() throws Exception { assertTrue(ess.ackTaskRecieved(task.getTaskId(), "task1.junit.worker")); Uninterruptibles.sleepUninterruptibly(3, TimeUnit.SECONDS); - ds.decide(wfid, provider); + provider.decide(wfid); es = ess.getExecutionStatus(wfid, true); assertNotNull(es); @@ -2437,7 +2439,7 @@ public void testPauseResume() throws Exception { assertNull("Found: " + task, task); // Even if decide is run again the next task will not be scheduled as the workflow is still paused-- - ds.decide(wfid, provider); + provider.decide(wfid); task = ess.poll("junit_task_2", "task2.junit.worker"); assertTrue(task == null); @@ -2649,6 +2651,34 @@ public void testSubWorkflowFailureInverse() throws Exception { } + @Test + public void testWait() throws Exception { + + + WorkflowDef def = new WorkflowDef(); + def.setName("test_wait"); + def.setSchemaVersion(2); + WorkflowTask wait = new WorkflowTask(); + wait.setWorkflowTaskType(Type.WAIT); + wait.setName("wait"); + wait.setTaskReferenceName("wait0"); + def.getTasks().add(wait); + ms.registerWorkflowDef(def); + + String id = provider.startWorkflow(def.getName(), def.getVersion(), "", new HashMap<>()); + Workflow workflow = provider.getWorkflow(id, true); + assertNotNull(workflow); + assertEquals(1, workflow.getTasks().size()); + assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); + Task waitTask = workflow.getTasks().get(0); + assertEquals(WorkflowTask.Type.WAIT.name(), waitTask.getTaskType()); + waitTask.setStatus(Status.COMPLETED); + provider.updateTask(new TaskResult(waitTask)); + + workflow = provider.getWorkflow(id, true); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + } + private void createSubWorkflow() throws Exception { diff --git a/test-harness/src/test/java/com/netflix/conductor/tests/utils/MockIndexDAO.java b/test-harness/src/test/java/com/netflix/conductor/tests/utils/MockIndexDAO.java index a1922050c3..b0aa0794de 100644 --- a/test-harness/src/test/java/com/netflix/conductor/tests/utils/MockIndexDAO.java +++ b/test-harness/src/test/java/com/netflix/conductor/tests/utils/MockIndexDAO.java @@ -70,4 +70,5 @@ public void add(EventExecution ee) { public void addMessage(String queue, Message msg) { } + } diff --git a/ui/.gitignore b/ui/.gitignore new file mode 100644 index 0000000000..ae3c172604 --- /dev/null +++ b/ui/.gitignore @@ -0,0 +1 @@ +/bin/ diff --git a/ui/src/actions/WorkflowActions.js b/ui/src/actions/WorkflowActions.js index 7395fce132..aa914a7f32 100644 --- a/ui/src/actions/WorkflowActions.js +++ b/ui/src/actions/WorkflowActions.js @@ -1,6 +1,6 @@ import http from '../core/HttpClient'; -export function searchWorkflows(query, search, hours, fullstr) { +export function searchWorkflows(query, search, hours, fullstr, start) { return function (dispatch) { dispatch({ @@ -11,7 +11,7 @@ export function searchWorkflows(query, search, hours, fullstr) { if(fullstr && search != null && search.length > 0) { search = '"' + search + '"'; } - return http.get('/api/wfe/' + status + '?q=' + query + '&h=' + hours + '&freeText=' + search).then((data) => { + return http.get('/api/wfe/' + status + '?q=' + query + '&h=' + hours + '&freeText=' + search + '&start=' + start).then((data) => { dispatch({ type: 'RECEIVED_WORKFLOWS', data diff --git a/ui/src/api/sys.js b/ui/src/api/sys.js index cb81771796..2fbd4d2eaf 100644 --- a/ui/src/api/sys.js +++ b/ui/src/api/sys.js @@ -11,10 +11,11 @@ router.get('/', async (req, res, next) => { try { const result = { server: wfServer, - version: '1.0', env: process.env }; - + const config = await http.get(wfServer + 'admin/config'); + result.version = config.version; + result.buildDate = config.buildDate; res.status(200).send({sys: result}); } catch (err) { next(err); diff --git a/ui/src/api/wfe.js b/ui/src/api/wfe.js index 0259a4c673..1c2f687365 100644 --- a/ui/src/api/wfe.js +++ b/ui/src/api/wfe.js @@ -25,9 +25,13 @@ router.get('/', async (req, res, next) => { if(h != '-1'){ freeText.push('startTime:[now-' + h + 'h TO now]'); } + let start = 0; + if(!isNaN(req.query.start)){ + start = req.query.start; + } let query = req.query.q; - const url = baseURL2 + 'search?start=0&size=100&sort=startTime:DESC&freeText=' + freeText.join(' AND ') + '&query=' + query; + const url = baseURL2 + 'search?size=100&sort=startTime:DESC&freeText=' + freeText.join(' AND ') + '&start=' + start + '&query=' + query; const result = await http.get(url); const hits = result.results; res.status(200).send({result: {hits:hits, totalHits: result.totalHits}}); diff --git a/ui/src/components/common/Footer.js b/ui/src/components/common/Footer.js index 0d6c8b5688..5a4457a135 100644 --- a/ui/src/components/common/Footer.js +++ b/ui/src/components/common/Footer.js @@ -21,8 +21,11 @@ class Footer extends Component { render() { return (
-
- Workflow Server: {this.state.sys.server} +
+ Server: {this.state.sys.server} + + Version: {this.state.sys.version} | Build Date: {this.state.sys.buildDate} +
); diff --git a/ui/src/components/common/LeftMenu.js b/ui/src/components/common/LeftMenu.js index 7c7981083f..a8f9e82fa1 100644 --- a/ui/src/components/common/LeftMenu.js +++ b/ui/src/components/common/LeftMenu.js @@ -125,9 +125,6 @@ const LeftMenu = React.createClass({
{menuItems}
-
- {this.state.version} -
); } diff --git a/ui/src/components/event/EventList.js b/ui/src/components/event/EventList.js index 1c0085e2de..d7eb1cc6c8 100644 --- a/ui/src/components/event/EventList.js +++ b/ui/src/components/event/EventList.js @@ -30,7 +30,6 @@ const Events = React.createClass({ return cell?'Yes':'No'; }; function helpName() { - // return (
diff --git a/ui/src/components/workflow/executions/WorkflowDetails.js b/ui/src/components/workflow/executions/WorkflowDetails.js index 3b338d70da..f3249a683c 100644 --- a/ui/src/components/workflow/executions/WorkflowDetails.js +++ b/ui/src/components/workflow/executions/WorkflowDetails.js @@ -52,7 +52,7 @@ class WorkflowDetails extends Component { } let tasks = wf['tasks']; tasks = tasks.sort(function(a,b){ - return a.endTime - b.endTime; + return a.seq - b.seq; }); function formatDate(dt){ if(dt == null || dt == ''){ @@ -202,7 +202,8 @@ class WorkflowDetails extends Component {
-
{JSON.stringify(wf, null, 3)}
+ +
{JSON.stringify(wf, null, 3)}
diff --git a/ui/src/components/workflow/executions/WorkflowList.js b/ui/src/components/workflow/executions/WorkflowList.js index 7a0374beb5..e1324f47a7 100644 --- a/ui/src/components/workflow/executions/WorkflowList.js +++ b/ui/src/components/workflow/executions/WorkflowList.js @@ -27,6 +27,11 @@ const Workflow = React.createClass({ if(search == null || search == 'undefined' || search == '') { search = ''; } + let st = this.props.location.query.start; + let start = 0; + if(!isNaN(st)) { + start = parseInt(st); + } return { search: search, @@ -35,7 +40,8 @@ const Workflow = React.createClass({ h: this.props.location.query.h, workflows: [], update: true, - fullstr: true + fullstr: true, + start: start } }, componentWillMount(){ @@ -56,6 +62,10 @@ const Workflow = React.createClass({ if(isNaN(h)) { h = ''; } + let start = nextProps.location.query.start; + if(isNaN(start)) { + start = 0; + } let status = nextProps.location.query.status; if(status != null && status != '') { status = status.split(','); @@ -66,6 +76,7 @@ const Workflow = React.createClass({ let update = true; update = this.state.search != search; update = update || (this.state.h != h); + update = update || (this.state.start != start); update = update || (this.state.status.join(',') != status.join(',')); this.setState({ @@ -73,7 +84,8 @@ const Workflow = React.createClass({ h : h, update : update, status : status, - workflows : workflowDefs + workflows : workflowDefs, + start : start }); this.refreshResults(); @@ -94,7 +106,8 @@ const Workflow = React.createClass({ let h = this.state.h; let workflowTypes = this.state.workflowTypes; let status = this.state.status; - this.props.history.pushState(null, "/workflow?q=" + q + "&h=" + h + "&workflowTypes=" + workflowTypes + "&status=" + status); + let start = this.state.start; + this.props.history.pushState(null, "/workflow?q=" + q + "&h=" + h + "&workflowTypes=" + workflowTypes + "&status=" + status + "&start=" + start); }, doDispatch() { @@ -111,7 +124,7 @@ const Workflow = React.createClass({ if(this.state.status.length > 0) { query.push('status IN (' + this.state.status.join(',') + ') '); } - this.props.dispatch(searchWorkflows(query.join(' AND '), search, this.state.h, this.state.fullstr)); + this.props.dispatch(searchWorkflows(query.join(' AND '), search, this.state.h, this.state.fullstr, this.state.start)); }, workflowTypeChange(workflowTypes) { this.state.update = true; @@ -123,6 +136,19 @@ const Workflow = React.createClass({ this.state.status = status; this.refreshResults(); }, + nextPage() { + this.state.start = 100 + parseInt(this.state.start); + this.state.update = true; + this.refreshResults(); + }, + prevPage() { + this.state.start = parseInt(this.state.start) - 100; + if(this.state.start < 0) { + this.state.start = 0; + } + this.state.update = true; + this.refreshResults(); + }, searchChange(e){ let val = e.target.value; this.setState({ search: val }); @@ -156,6 +182,11 @@ const Workflow = React.createClass({ totalHits = this.props.data.totalHits; found = wfs.length; } + let start = parseInt(this.state.start); + let max = start + 100; + if(found < 100) { + max = start + found; + } const workflowNames = this.state.workflows?this.state.workflows:[]; const statusList = ['RUNNING','COMPLETED','FAILED','TIMED_OUT','TERMINATED','PAUSED']; function linkMaker(cell, row) { @@ -230,7 +261,11 @@ const Workflow = React.createClass({ - Displaying {found} of {totalHits} Workflows Found. + Total Workflows Found: {totalHits}, Displaying {this.state.start} to {max} + + {parseInt(this.state.start) >= 100? Previous Page:''} + {parseInt(this.state.start) + 100 <= totalHits?  Next Page :''} + Workflow Workflow ID diff --git a/ui/src/styles/main.css b/ui/src/styles/main.css index 774b44c183..b1213e3c53 100644 --- a/ui/src/styles/main.css +++ b/ui/src/styles/main.css @@ -102,7 +102,9 @@ body { } .Footer-text { - color: rgba(255, 255, 255, .7); + color: rgba(255, 255, 255, 255); + font-size: 0.9em; + letter-spacing: 1px; } .Footer-text--muted {