|
| 1 | +/* |
| 2 | + * |
| 3 | + * Copyright 2015, Google Inc. |
| 4 | + * All rights reserved. |
| 5 | + * |
| 6 | + * Redistribution and use in source and binary forms, with or without |
| 7 | + * modification, are permitted provided that the following conditions are |
| 8 | + * met: |
| 9 | + * |
| 10 | + * * Redistributions of source code must retain the above copyright |
| 11 | + * notice, this list of conditions and the following disclaimer. |
| 12 | + * * Redistributions in binary form must reproduce the above |
| 13 | + * copyright notice, this list of conditions and the following disclaimer |
| 14 | + * in the documentation and/or other materials provided with the |
| 15 | + * distribution. |
| 16 | + * * Neither the name of Google Inc. nor the names of its |
| 17 | + * contributors may be used to endorse or promote products derived from |
| 18 | + * this software without specific prior written permission. |
| 19 | + * |
| 20 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 | + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 24 | + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 25 | + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 26 | + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 27 | + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 28 | + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 29 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 30 | + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | + * |
| 32 | + */ |
| 33 | + |
| 34 | +#include "src/core/iomgr/executor.h" |
| 35 | + |
| 36 | +#include <string.h> |
| 37 | + |
| 38 | +#include <grpc/support/alloc.h> |
| 39 | +#include <grpc/support/log.h> |
| 40 | +#include <grpc/support/sync.h> |
| 41 | +#include <grpc/support/thd.h> |
| 42 | +#include "src/core/iomgr/exec_ctx.h" |
| 43 | + |
| 44 | +typedef struct grpc_executor_data { |
| 45 | + int busy; /**< is the thread currently running? */ |
| 46 | + int shutting_down; /**< has \a grpc_shutdown() been invoked? */ |
| 47 | + int pending_join; /**< has the thread finished but not been joined? */ |
| 48 | + grpc_closure_list closures; /**< collection of pending work */ |
| 49 | + gpr_thd_id tid; /**< thread id of the thread, only valid if \a busy or \a |
| 50 | + pending_join are true */ |
| 51 | + gpr_thd_options options; |
| 52 | + gpr_mu mu; |
| 53 | +} grpc_executor; |
| 54 | + |
| 55 | +static grpc_executor g_executor; |
| 56 | + |
| 57 | +void grpc_executor_init() { |
| 58 | + memset(&g_executor, 0, sizeof(grpc_executor)); |
| 59 | + gpr_mu_init(&g_executor.mu); |
| 60 | + g_executor.options = gpr_thd_options_default(); |
| 61 | + gpr_thd_options_set_joinable(&g_executor.options); |
| 62 | +} |
| 63 | + |
| 64 | +/* thread body */ |
| 65 | +static void closure_exec_thread_func(void *ignored) { |
| 66 | + grpc_closure *closure; |
| 67 | + |
| 68 | + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; |
| 69 | + while (1) { |
| 70 | + gpr_mu_lock(&g_executor.mu); |
| 71 | + if (g_executor.shutting_down != 0) { |
| 72 | + gpr_mu_unlock(&g_executor.mu); |
| 73 | + break; |
| 74 | + } |
| 75 | + closure = grpc_closure_list_pop(&g_executor.closures); |
| 76 | + if (closure == NULL) { |
| 77 | + /* no more work, time to die */ |
| 78 | + GPR_ASSERT(g_executor.busy == 1); |
| 79 | + g_executor.busy = 0; |
| 80 | + gpr_mu_unlock(&g_executor.mu); |
| 81 | + break; |
| 82 | + } |
| 83 | + gpr_mu_unlock(&g_executor.mu); |
| 84 | + closure->cb(&exec_ctx, closure->cb_arg, closure->success); |
| 85 | + grpc_exec_ctx_flush(&exec_ctx); |
| 86 | + } |
| 87 | + grpc_exec_ctx_finish(&exec_ctx); |
| 88 | +} |
| 89 | + |
| 90 | +/* Spawn the thread if new work has arrived a no thread is up */ |
| 91 | +static void maybe_spawn_locked() { |
| 92 | + if (grpc_closure_list_empty(g_executor.closures) == 1) { |
| 93 | + return; |
| 94 | + } |
| 95 | + if (g_executor.shutting_down == 1) { |
| 96 | + return; |
| 97 | + } |
| 98 | + |
| 99 | + if (g_executor.busy != 0) { |
| 100 | + /* Thread still working. New work will be picked up by already running |
| 101 | + * thread. Not spawning anything. */ |
| 102 | + return; |
| 103 | + } else if (g_executor.pending_join != 0) { |
| 104 | + /* Pickup the remains of the previous incarnations of the thread. */ |
| 105 | + gpr_thd_join(g_executor.tid); |
| 106 | + g_executor.pending_join = 0; |
| 107 | + } |
| 108 | + |
| 109 | + /* All previous instances of the thread should have been joined at this point. |
| 110 | + * Spawn time! */ |
| 111 | + g_executor.busy = 1; |
| 112 | + gpr_thd_new(&g_executor.tid, closure_exec_thread_func, NULL, |
| 113 | + &g_executor.options); |
| 114 | + g_executor.pending_join = 1; |
| 115 | +} |
| 116 | + |
| 117 | +void grpc_executor_enqueue(grpc_closure *closure, int success) { |
| 118 | + gpr_mu_lock(&g_executor.mu); |
| 119 | + if (g_executor.shutting_down == 0) { |
| 120 | + grpc_closure_list_add(&g_executor.closures, closure, success); |
| 121 | + maybe_spawn_locked(); |
| 122 | + } |
| 123 | + gpr_mu_unlock(&g_executor.mu); |
| 124 | +} |
| 125 | + |
| 126 | +void grpc_executor_shutdown() { |
| 127 | + int pending_join; |
| 128 | + grpc_closure *closure; |
| 129 | + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; |
| 130 | + |
| 131 | + gpr_mu_lock(&g_executor.mu); |
| 132 | + pending_join = g_executor.pending_join; |
| 133 | + g_executor.shutting_down = 1; |
| 134 | + gpr_mu_unlock(&g_executor.mu); |
| 135 | + /* we can release the lock at this point despite the access to the closure |
| 136 | + * list below because we aren't accepting new work */ |
| 137 | + |
| 138 | + /* Execute pending callbacks, some may be performing cleanups */ |
| 139 | + while ((closure = grpc_closure_list_pop(&g_executor.closures)) != NULL) { |
| 140 | + closure->cb(&exec_ctx, closure->cb_arg, closure->success); |
| 141 | + } |
| 142 | + grpc_exec_ctx_finish(&exec_ctx); |
| 143 | + GPR_ASSERT(grpc_closure_list_empty(g_executor.closures)); |
| 144 | + if (pending_join) { |
| 145 | + gpr_thd_join(g_executor.tid); |
| 146 | + } |
| 147 | + gpr_mu_destroy(&g_executor.mu); |
| 148 | +} |
0 commit comments