Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bug7263/pmix setup app api v3 #12

Open
wants to merge 7 commits into
base: slurm/slurm-20.11
Choose a base branch
from

Conversation

karasevb
Copy link
Owner

@karasevb karasevb commented Dec 6, 2020

No description provided.

Comment on lines +590 to +696
* dependency: for any rank \in [0; nspace->ntasks - 1]
* num_digits_10(rank) <= num_digits_10(nspace->ntasks)
*
* So we can say that the cumulative number "digits_cnt" of all symbols
* comprising all rank numbers in the namespace is:
* digits_size <= num_digits_10(nspace->ntasks) * nspace->ntasks
* Every rank is followed either by a comma, a semicolon, or the terminating
* '\0', thus each rank requires at most num_digits_10(nspace_ntasks) + 1.
* So we need at most: (num_digits_10(nspace->ntasks) + 1) * nspace->ntasks.
*
* Considering a 1.000.000 core system with 64PPN.
* The size of the intermediate buffer will be:
* - num_digits_10(1.000.000) = 7
* - (7 + 1) * 1.000.000 ~= 8MB
*/
static size_t _proc_map_buffer_size(uint32_t ntasks)
{
return (pmixp_count_digits_base10(ntasks) + 1) * ntasks;
}

/* Build a sequence of ranks sorted by nodes */
static void _build_node2task_map(uint32_t nnodes, uint32_t ntasks,
uint32_t *task_cnts, uint32_t *task_map,
uint32_t *node2tasks)
{
uint32_t *node_offs = xcalloc(nnodes, sizeof(*node_offs));
uint32_t *node_tasks = xcalloc(nnodes, sizeof(*node_tasks));

/* Build the offsets structure needed to fill the node-to-tasks map */
for (int i = 1; i < nnodes; i++)
node_offs[i] = node_offs[i - 1] + task_cnts[i - 1];

xassert(ntasks == (node_offs[nnodes - 1] + task_cnts[nnodes - 1]));

/* Fill the node-to-task map */
for (int i = 0; i < ntasks; i++) {
int node = task_map[i], offset;
xassert(node < nnodes);
offset = node_offs[node] + node_tasks[node]++;
xassert(task_cnts[node] >= node_tasks[node]);
node2tasks[offset] = i;
}

/* Cleanup service structures */
xfree(node_offs);
xfree(node_tasks);
}

char *pmixp_info_get_node_map(hostlist_t hl)
{
char *input, *regexp;
int rc;

input = hostlist_deranged_string_malloc(hl);
rc = PMIx_generate_regex(input, &regexp);
free(input);
if (PMIX_SUCCESS != rc) {
return NULL;
}
return regexp;
}

char *pmixp_info_get_proc_map(hostlist_t hl, uint32_t nnodes,
uint32_t ntasks, uint32_t *task_cnts,
uint32_t *task_map)
{
char *regexp, *map = NULL, *pos = NULL;
uint32_t *node2tasks = NULL, *cur_task = NULL;
int rc, i, j;
int count = hostlist_count(hl);

/* Preallocate the buffer to avoid constant xremalloc() calls. */
map = xmalloc(_proc_map_buffer_size(ntasks));

/* Build a node-to-tasks map that can be traversed in O(n) steps */
node2tasks = xcalloc(ntasks, sizeof(*node2tasks));
_build_node2task_map(nnodes, ntasks, task_cnts, task_map, node2tasks);
cur_task = node2tasks;

for (i = 0; i < nnodes; i++) {
char *sep = "";
/* For each node, provide IDs of the tasks residing on it */
for (j = 0; j < task_cnts[i]; j++){
xstrfmtcatat(map, &pos, "%s%u", sep, *(cur_task++));
sep = ",";
}
if (i < (count - 1)) {
xstrfmtcatat(map, &pos, ";");
}
}
rc = PMIx_generate_ppn(map, &regexp);
xfree(map);
xfree(node2tasks);

if (PMIX_SUCCESS != rc) {
return NULL;
}

return regexp;
}
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Move to pmixp_client

Suggested change
* Estimate the size of a buffer capable of holding the proc map for this job.
* PMIx proc map string format:
*
* xx,yy,...,zz;ll,mm,...,nn;...;aa,bb,...,cc;
* - n0 ranks -;- n1 ranks -;...;- nX ranks -;
*
* To roughly estimate the size of the string we leverage the following
* dependency: for any rank \in [0; nspace->ntasks - 1]
* num_digits_10(rank) <= num_digits_10(nspace->ntasks)
*
* So we can say that the cumulative number "digits_cnt" of all symbols
* comprising all rank numbers in the namespace is:
* digits_size <= num_digits_10(nspace->ntasks) * nspace->ntasks
* Every rank is followed either by a comma, a semicolon, or the terminating
* '\0', thus each rank requires at most num_digits_10(nspace_ntasks) + 1.
* So we need at most: (num_digits_10(nspace->ntasks) + 1) * nspace->ntasks.
*
* Considering a 1.000.000 core system with 64PPN.
* The size of the intermediate buffer will be:
* - num_digits_10(1.000.000) = 7
* - (7 + 1) * 1.000.000 ~= 8MB
*/
static size_t _proc_map_buffer_size(uint32_t ntasks)
{
return (pmixp_count_digits_base10(ntasks) + 1) * ntasks;
}
/* Build a sequence of ranks sorted by nodes */
static void _build_node2task_map(uint32_t nnodes, uint32_t ntasks,
uint32_t *task_cnts, uint32_t *task_map,
uint32_t *node2tasks)
{
uint32_t *node_offs = xcalloc(nnodes, sizeof(*node_offs));
uint32_t *node_tasks = xcalloc(nnodes, sizeof(*node_tasks));
/* Build the offsets structure needed to fill the node-to-tasks map */
for (int i = 1; i < nnodes; i++)
node_offs[i] = node_offs[i - 1] + task_cnts[i - 1];
xassert(ntasks == (node_offs[nnodes - 1] + task_cnts[nnodes - 1]));
/* Fill the node-to-task map */
for (int i = 0; i < ntasks; i++) {
int node = task_map[i], offset;
xassert(node < nnodes);
offset = node_offs[node] + node_tasks[node]++;
xassert(task_cnts[node] >= node_tasks[node]);
node2tasks[offset] = i;
}
/* Cleanup service structures */
xfree(node_offs);
xfree(node_tasks);
}
char *pmixp_info_get_node_map(hostlist_t hl)
{
char *input, *regexp;
int rc;
input = hostlist_deranged_string_malloc(hl);
rc = PMIx_generate_regex(input, &regexp);
free(input);
if (PMIX_SUCCESS != rc) {
return NULL;
}
return regexp;
}
char *pmixp_info_get_proc_map(hostlist_t hl, uint32_t nnodes,
uint32_t ntasks, uint32_t *task_cnts,
uint32_t *task_map)
{
char *regexp, *map = NULL, *pos = NULL;
uint32_t *node2tasks = NULL, *cur_task = NULL;
int rc, i, j;
int count = hostlist_count(hl);
/* Preallocate the buffer to avoid constant xremalloc() calls. */
map = xmalloc(_proc_map_buffer_size(ntasks));
/* Build a node-to-tasks map that can be traversed in O(n) steps */
node2tasks = xcalloc(ntasks, sizeof(*node2tasks));
_build_node2task_map(nnodes, ntasks, task_cnts, task_map, node2tasks);
cur_task = node2tasks;
for (i = 0; i < nnodes; i++) {
char *sep = "";
/* For each node, provide IDs of the tasks residing on it */
for (j = 0; j < task_cnts[i]; j++){
xstrfmtcatat(map, &pos, "%s%u", sep, *(cur_task++));
sep = ",";
}
if (i < (count - 1)) {
xstrfmtcatat(map, &pos, ";");
}
}
rc = PMIx_generate_ppn(map, &regexp);
xfree(map);
xfree(node2tasks);
if (PMIX_SUCCESS != rc) {
return NULL;
}
return regexp;
}
* Estimate the size of a buffer capable of holding the proc map for this job.
* PMIx proc map string format:
*
* xx,yy,...,zz;ll,mm,...,nn;...;aa,bb,...,cc;
* - n0 ranks -;- n1 ranks -;...;- nX ranks -;
*
* To roughly estimate the size of the string we leverage the following
* dependency: for any rank \in [0; nspace->ntasks - 1]
* num_digits_10(rank) <= num_digits_10(nspace->ntasks)
*
* So we can say that the cumulative number "digits_cnt" of all symbols
* comprising all rank numbers in the namespace is:
* digits_size <= num_digits_10(nspace->ntasks) * nspace->ntasks
* Every rank is followed either by a comma, a semicolon, or the terminating
* '\0', thus each rank requires at most num_digits_10(nspace_ntasks) + 1.
* So we need at most: (num_digits_10(nspace->ntasks) + 1) * nspace->ntasks.
*
* Considering a 1.000.000 core system with 64PPN.
* The size of the intermediate buffer will be:
* - num_digits_10(1.000.000) = 7
* - (7 + 1) * 1.000.000 ~= 8MB
*/
static size_t _proc_map_buffer_size(uint32_t ntasks)
{
return (pmixp_count_digits_base10(ntasks) + 1) * ntasks;
}
/* Build a sequence of ranks sorted by nodes */
static void _build_node2task_map(uint32_t nnodes, uint32_t ntasks,
uint32_t *task_cnts, uint32_t *task_map,
uint32_t *node2tasks)
{
uint32_t *node_offs = xcalloc(nnodes, sizeof(*node_offs));
uint32_t *node_tasks = xcalloc(nnodes, sizeof(*node_tasks));
/* Build the offsets structure needed to fill the node-to-tasks map */
for (int i = 1; i < nnodes; i++)
node_offs[i] = node_offs[i - 1] + task_cnts[i - 1];
xassert(ntasks == (node_offs[nnodes - 1] + task_cnts[nnodes - 1]));
/* Fill the node-to-task map */
for (int i = 0; i < ntasks; i++) {
int node = task_map[i], offset;
xassert(node < nnodes);
offset = node_offs[node] + node_tasks[node]++;
xassert(task_cnts[node] >= node_tasks[node]);
node2tasks[offset] = i;
}
/* Cleanup service structures */
xfree(node_offs);
xfree(node_tasks);
}
char *pmixp_info_get_node_map(hostlist_t hl)
{
char *input, *regexp;
int rc;
input = hostlist_deranged_string_malloc(hl);
rc = PMIx_generate_regex(input, &regexp);
free(input);
if (PMIX_SUCCESS != rc) {
return NULL;
}
return regexp;
}
char *pmixp_info_get_proc_map(hostlist_t hl, uint32_t nnodes,
uint32_t ntasks, uint32_t *task_cnts,
uint32_t *task_map)
{
char *regexp, *map = NULL, *pos = NULL;
uint32_t *node2tasks = NULL, *cur_task = NULL;
int rc, i, j;
int count = hostlist_count(hl);
/* Preallocate the buffer to avoid constant xremalloc() calls. */
map = xmalloc(_proc_map_buffer_size(ntasks));
/* Build a node-to-tasks map that can be traversed in O(n) steps */
node2tasks = xcalloc(ntasks, sizeof(*node2tasks));
_build_node2task_map(nnodes, ntasks, task_cnts, task_map, node2tasks);
cur_task = node2tasks;
for (i = 0; i < nnodes; i++) {
char *sep = "";
/* For each node, provide IDs of the tasks residing on it */
for (j = 0; j < task_cnts[i]; j++){
xstrfmtcatat(map, &pos, "%s%u", sep, *(cur_task++));
sep = ",";
}
if (i < (count - 1)) {
xstrfmtcatat(map, &pos, ";");
}
}
rc = PMIx_generate_ppn(map, &regexp);
xfree(map);
xfree(node2tasks);
if (PMIX_SUCCESS != rc) {
return NULL;
}
return regexp;
}

Comment on lines +590 to +696
* dependency: for any rank \in [0; nspace->ntasks - 1]
* num_digits_10(rank) <= num_digits_10(nspace->ntasks)
*
* So we can say that the cumulative number "digits_cnt" of all symbols
* comprising all rank numbers in the namespace is:
* digits_size <= num_digits_10(nspace->ntasks) * nspace->ntasks
* Every rank is followed either by a comma, a semicolon, or the terminating
* '\0', thus each rank requires at most num_digits_10(nspace_ntasks) + 1.
* So we need at most: (num_digits_10(nspace->ntasks) + 1) * nspace->ntasks.
*
* Considering a 1.000.000 core system with 64PPN.
* The size of the intermediate buffer will be:
* - num_digits_10(1.000.000) = 7
* - (7 + 1) * 1.000.000 ~= 8MB
*/
static size_t _proc_map_buffer_size(uint32_t ntasks)
{
return (pmixp_count_digits_base10(ntasks) + 1) * ntasks;
}

/* Build a sequence of ranks sorted by nodes */
static void _build_node2task_map(uint32_t nnodes, uint32_t ntasks,
uint32_t *task_cnts, uint32_t *task_map,
uint32_t *node2tasks)
{
uint32_t *node_offs = xcalloc(nnodes, sizeof(*node_offs));
uint32_t *node_tasks = xcalloc(nnodes, sizeof(*node_tasks));

/* Build the offsets structure needed to fill the node-to-tasks map */
for (int i = 1; i < nnodes; i++)
node_offs[i] = node_offs[i - 1] + task_cnts[i - 1];

xassert(ntasks == (node_offs[nnodes - 1] + task_cnts[nnodes - 1]));

/* Fill the node-to-task map */
for (int i = 0; i < ntasks; i++) {
int node = task_map[i], offset;
xassert(node < nnodes);
offset = node_offs[node] + node_tasks[node]++;
xassert(task_cnts[node] >= node_tasks[node]);
node2tasks[offset] = i;
}

/* Cleanup service structures */
xfree(node_offs);
xfree(node_tasks);
}

char *pmixp_info_get_node_map(hostlist_t hl)
{
char *input, *regexp;
int rc;

input = hostlist_deranged_string_malloc(hl);
rc = PMIx_generate_regex(input, &regexp);
free(input);
if (PMIX_SUCCESS != rc) {
return NULL;
}
return regexp;
}

char *pmixp_info_get_proc_map(hostlist_t hl, uint32_t nnodes,
uint32_t ntasks, uint32_t *task_cnts,
uint32_t *task_map)
{
char *regexp, *map = NULL, *pos = NULL;
uint32_t *node2tasks = NULL, *cur_task = NULL;
int rc, i, j;
int count = hostlist_count(hl);

/* Preallocate the buffer to avoid constant xremalloc() calls. */
map = xmalloc(_proc_map_buffer_size(ntasks));

/* Build a node-to-tasks map that can be traversed in O(n) steps */
node2tasks = xcalloc(ntasks, sizeof(*node2tasks));
_build_node2task_map(nnodes, ntasks, task_cnts, task_map, node2tasks);
cur_task = node2tasks;

for (i = 0; i < nnodes; i++) {
char *sep = "";
/* For each node, provide IDs of the tasks residing on it */
for (j = 0; j < task_cnts[i]; j++){
xstrfmtcatat(map, &pos, "%s%u", sep, *(cur_task++));
sep = ",";
}
if (i < (count - 1)) {
xstrfmtcatat(map, &pos, ";");
}
}
rc = PMIx_generate_ppn(map, &regexp);
xfree(map);
xfree(node2tasks);

if (PMIX_SUCCESS != rc) {
return NULL;
}

return regexp;
}
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Move to pmixp_client

Suggested change
* Estimate the size of a buffer capable of holding the proc map for this job.
* PMIx proc map string format:
*
* xx,yy,...,zz;ll,mm,...,nn;...;aa,bb,...,cc;
* - n0 ranks -;- n1 ranks -;...;- nX ranks -;
*
* To roughly estimate the size of the string we leverage the following
* dependency: for any rank \in [0; nspace->ntasks - 1]
* num_digits_10(rank) <= num_digits_10(nspace->ntasks)
*
* So we can say that the cumulative number "digits_cnt" of all symbols
* comprising all rank numbers in the namespace is:
* digits_size <= num_digits_10(nspace->ntasks) * nspace->ntasks
* Every rank is followed either by a comma, a semicolon, or the terminating
* '\0', thus each rank requires at most num_digits_10(nspace_ntasks) + 1.
* So we need at most: (num_digits_10(nspace->ntasks) + 1) * nspace->ntasks.
*
* Considering a 1.000.000 core system with 64PPN.
* The size of the intermediate buffer will be:
* - num_digits_10(1.000.000) = 7
* - (7 + 1) * 1.000.000 ~= 8MB
*/
static size_t _proc_map_buffer_size(uint32_t ntasks)
{
return (pmixp_count_digits_base10(ntasks) + 1) * ntasks;
}
/* Build a sequence of ranks sorted by nodes */
static void _build_node2task_map(uint32_t nnodes, uint32_t ntasks,
uint32_t *task_cnts, uint32_t *task_map,
uint32_t *node2tasks)
{
uint32_t *node_offs = xcalloc(nnodes, sizeof(*node_offs));
uint32_t *node_tasks = xcalloc(nnodes, sizeof(*node_tasks));
/* Build the offsets structure needed to fill the node-to-tasks map */
for (int i = 1; i < nnodes; i++)
node_offs[i] = node_offs[i - 1] + task_cnts[i - 1];
xassert(ntasks == (node_offs[nnodes - 1] + task_cnts[nnodes - 1]));
/* Fill the node-to-task map */
for (int i = 0; i < ntasks; i++) {
int node = task_map[i], offset;
xassert(node < nnodes);
offset = node_offs[node] + node_tasks[node]++;
xassert(task_cnts[node] >= node_tasks[node]);
node2tasks[offset] = i;
}
/* Cleanup service structures */
xfree(node_offs);
xfree(node_tasks);
}
char *pmixp_info_get_node_map(hostlist_t hl)
{
char *input, *regexp;
int rc;
input = hostlist_deranged_string_malloc(hl);
rc = PMIx_generate_regex(input, &regexp);
free(input);
if (PMIX_SUCCESS != rc) {
return NULL;
}
return regexp;
}
char *pmixp_info_get_proc_map(hostlist_t hl, uint32_t nnodes,
uint32_t ntasks, uint32_t *task_cnts,
uint32_t *task_map)
{
char *regexp, *map = NULL, *pos = NULL;
uint32_t *node2tasks = NULL, *cur_task = NULL;
int rc, i, j;
int count = hostlist_count(hl);
/* Preallocate the buffer to avoid constant xremalloc() calls. */
map = xmalloc(_proc_map_buffer_size(ntasks));
/* Build a node-to-tasks map that can be traversed in O(n) steps */
node2tasks = xcalloc(ntasks, sizeof(*node2tasks));
_build_node2task_map(nnodes, ntasks, task_cnts, task_map, node2tasks);
cur_task = node2tasks;
for (i = 0; i < nnodes; i++) {
char *sep = "";
/* For each node, provide IDs of the tasks residing on it */
for (j = 0; j < task_cnts[i]; j++){
xstrfmtcatat(map, &pos, "%s%u", sep, *(cur_task++));
sep = ",";
}
if (i < (count - 1)) {
xstrfmtcatat(map, &pos, ";");
}
}
rc = PMIx_generate_ppn(map, &regexp);
xfree(map);
xfree(node2tasks);
if (PMIX_SUCCESS != rc) {
return NULL;
}
return regexp;
}
* Estimate the size of a buffer capable of holding the proc map for this job.
* PMIx proc map string format:
*
* xx,yy,...,zz;ll,mm,...,nn;...;aa,bb,...,cc;
* - n0 ranks -;- n1 ranks -;...;- nX ranks -;
*
* To roughly estimate the size of the string we leverage the following
* dependency: for any rank \in [0; nspace->ntasks - 1]
* num_digits_10(rank) <= num_digits_10(nspace->ntasks)
*
* So we can say that the cumulative number "digits_cnt" of all symbols
* comprising all rank numbers in the namespace is:
* digits_size <= num_digits_10(nspace->ntasks) * nspace->ntasks
* Every rank is followed either by a comma, a semicolon, or the terminating
* '\0', thus each rank requires at most num_digits_10(nspace_ntasks) + 1.
* So we need at most: (num_digits_10(nspace->ntasks) + 1) * nspace->ntasks.
*
* Considering a 1.000.000 core system with 64PPN.
* The size of the intermediate buffer will be:
* - num_digits_10(1.000.000) = 7
* - (7 + 1) * 1.000.000 ~= 8MB
*/
static size_t _proc_map_buffer_size(uint32_t ntasks)
{
return (pmixp_count_digits_base10(ntasks) + 1) * ntasks;
}
/* Build a sequence of ranks sorted by nodes */
static void _build_node2task_map(uint32_t nnodes, uint32_t ntasks,
uint32_t *task_cnts, uint32_t *task_map,
uint32_t *node2tasks)
{
uint32_t *node_offs = xcalloc(nnodes, sizeof(*node_offs));
uint32_t *node_tasks = xcalloc(nnodes, sizeof(*node_tasks));
/* Build the offsets structure needed to fill the node-to-tasks map */
for (int i = 1; i < nnodes; i++)
node_offs[i] = node_offs[i - 1] + task_cnts[i - 1];
xassert(ntasks == (node_offs[nnodes - 1] + task_cnts[nnodes - 1]));
/* Fill the node-to-task map */
for (int i = 0; i < ntasks; i++) {
int node = task_map[i], offset;
xassert(node < nnodes);
offset = node_offs[node] + node_tasks[node]++;
xassert(task_cnts[node] >= node_tasks[node]);
node2tasks[offset] = i;
}
/* Cleanup service structures */
xfree(node_offs);
xfree(node_tasks);
}
char *pmixp_info_get_node_map(hostlist_t hl)
{
char *input, *regexp;
int rc;
input = hostlist_deranged_string_malloc(hl);
rc = PMIx_generate_regex(input, &regexp);
free(input);
if (PMIX_SUCCESS != rc) {
return NULL;
}
return regexp;
}
char *pmixp_info_get_proc_map(hostlist_t hl, uint32_t nnodes,
uint32_t ntasks, uint32_t *task_cnts,
uint32_t *task_map)
{
char *regexp, *map = NULL, *pos = NULL;
uint32_t *node2tasks = NULL, *cur_task = NULL;
int rc, i, j;
int count = hostlist_count(hl);
/* Preallocate the buffer to avoid constant xremalloc() calls. */
map = xmalloc(_proc_map_buffer_size(ntasks));
/* Build a node-to-tasks map that can be traversed in O(n) steps */
node2tasks = xcalloc(ntasks, sizeof(*node2tasks));
_build_node2task_map(nnodes, ntasks, task_cnts, task_map, node2tasks);
cur_task = node2tasks;
for (i = 0; i < nnodes; i++) {
char *sep = "";
/* For each node, provide IDs of the tasks residing on it */
for (j = 0; j < task_cnts[i]; j++){
xstrfmtcatat(map, &pos, "%s%u", sep, *(cur_task++));
sep = ",";
}
if (i < (count - 1)) {
xstrfmtcatat(map, &pos, ";");
}
}
rc = PMIx_generate_ppn(map, &regexp);
xfree(map);
xfree(node2tasks);
if (PMIX_SUCCESS != rc) {
return NULL;
}
return regexp;
}

}

/*
* Estimate the size of a buffer capable of holding the proc map for this job.
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

move to pmixp_client: BEGIN

}

return regexp;
}
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

move to pmixp_client: END


void pmixp_info_set_init(void)
{
_pmixp_info.initialized = 1;
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

make this a regular variable.

@@ -55,15 +58,16 @@ static bool _srv_use_direct_conn_ucx = false;
static int _srv_fence_coll_type = PMIXP_COLL_TYPE_FENCE_MAX;
static bool _srv_fence_coll_barrier = false;

pmix_jobinfo_t _pmixp_job_info;
pmixp_info_t _pmixp_info;
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
pmixp_info_t _pmixp_info;
pmixp_info_t _pmixp_info;
static volatile int _was_initialized = 0;

pmixp_srun_info_t srun;
pmixp_stepd_info_t stepd;
};
volatile int initialized;
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
volatile int initialized;

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.

2 participants