-
Notifications
You must be signed in to change notification settings - Fork 1
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Bug7263/pmix setup app api v3 #12
base: slurm/slurm-20.11
Are you sure you want to change the base?
Conversation
this function should help to check the max size of env value before append the new one
* dependency: for any rank \in [0; nspace->ntasks - 1] | ||
* num_digits_10(rank) <= num_digits_10(nspace->ntasks) | ||
* | ||
* So we can say that the cumulative number "digits_cnt" of all symbols | ||
* comprising all rank numbers in the namespace is: | ||
* digits_size <= num_digits_10(nspace->ntasks) * nspace->ntasks | ||
* Every rank is followed either by a comma, a semicolon, or the terminating | ||
* '\0', thus each rank requires at most num_digits_10(nspace_ntasks) + 1. | ||
* So we need at most: (num_digits_10(nspace->ntasks) + 1) * nspace->ntasks. | ||
* | ||
* Considering a 1.000.000 core system with 64PPN. | ||
* The size of the intermediate buffer will be: | ||
* - num_digits_10(1.000.000) = 7 | ||
* - (7 + 1) * 1.000.000 ~= 8MB | ||
*/ | ||
static size_t _proc_map_buffer_size(uint32_t ntasks) | ||
{ | ||
return (pmixp_count_digits_base10(ntasks) + 1) * ntasks; | ||
} | ||
|
||
/* Build a sequence of ranks sorted by nodes */ | ||
static void _build_node2task_map(uint32_t nnodes, uint32_t ntasks, | ||
uint32_t *task_cnts, uint32_t *task_map, | ||
uint32_t *node2tasks) | ||
{ | ||
uint32_t *node_offs = xcalloc(nnodes, sizeof(*node_offs)); | ||
uint32_t *node_tasks = xcalloc(nnodes, sizeof(*node_tasks)); | ||
|
||
/* Build the offsets structure needed to fill the node-to-tasks map */ | ||
for (int i = 1; i < nnodes; i++) | ||
node_offs[i] = node_offs[i - 1] + task_cnts[i - 1]; | ||
|
||
xassert(ntasks == (node_offs[nnodes - 1] + task_cnts[nnodes - 1])); | ||
|
||
/* Fill the node-to-task map */ | ||
for (int i = 0; i < ntasks; i++) { | ||
int node = task_map[i], offset; | ||
xassert(node < nnodes); | ||
offset = node_offs[node] + node_tasks[node]++; | ||
xassert(task_cnts[node] >= node_tasks[node]); | ||
node2tasks[offset] = i; | ||
} | ||
|
||
/* Cleanup service structures */ | ||
xfree(node_offs); | ||
xfree(node_tasks); | ||
} | ||
|
||
char *pmixp_info_get_node_map(hostlist_t hl) | ||
{ | ||
char *input, *regexp; | ||
int rc; | ||
|
||
input = hostlist_deranged_string_malloc(hl); | ||
rc = PMIx_generate_regex(input, ®exp); | ||
free(input); | ||
if (PMIX_SUCCESS != rc) { | ||
return NULL; | ||
} | ||
return regexp; | ||
} | ||
|
||
char *pmixp_info_get_proc_map(hostlist_t hl, uint32_t nnodes, | ||
uint32_t ntasks, uint32_t *task_cnts, | ||
uint32_t *task_map) | ||
{ | ||
char *regexp, *map = NULL, *pos = NULL; | ||
uint32_t *node2tasks = NULL, *cur_task = NULL; | ||
int rc, i, j; | ||
int count = hostlist_count(hl); | ||
|
||
/* Preallocate the buffer to avoid constant xremalloc() calls. */ | ||
map = xmalloc(_proc_map_buffer_size(ntasks)); | ||
|
||
/* Build a node-to-tasks map that can be traversed in O(n) steps */ | ||
node2tasks = xcalloc(ntasks, sizeof(*node2tasks)); | ||
_build_node2task_map(nnodes, ntasks, task_cnts, task_map, node2tasks); | ||
cur_task = node2tasks; | ||
|
||
for (i = 0; i < nnodes; i++) { | ||
char *sep = ""; | ||
/* For each node, provide IDs of the tasks residing on it */ | ||
for (j = 0; j < task_cnts[i]; j++){ | ||
xstrfmtcatat(map, &pos, "%s%u", sep, *(cur_task++)); | ||
sep = ","; | ||
} | ||
if (i < (count - 1)) { | ||
xstrfmtcatat(map, &pos, ";"); | ||
} | ||
} | ||
rc = PMIx_generate_ppn(map, ®exp); | ||
xfree(map); | ||
xfree(node2tasks); | ||
|
||
if (PMIX_SUCCESS != rc) { | ||
return NULL; | ||
} | ||
|
||
return regexp; | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Move to pmixp_client
* Estimate the size of a buffer capable of holding the proc map for this job. | |
* PMIx proc map string format: | |
* | |
* xx,yy,...,zz;ll,mm,...,nn;...;aa,bb,...,cc; | |
* - n0 ranks -;- n1 ranks -;...;- nX ranks -; | |
* | |
* To roughly estimate the size of the string we leverage the following | |
* dependency: for any rank \in [0; nspace->ntasks - 1] | |
* num_digits_10(rank) <= num_digits_10(nspace->ntasks) | |
* | |
* So we can say that the cumulative number "digits_cnt" of all symbols | |
* comprising all rank numbers in the namespace is: | |
* digits_size <= num_digits_10(nspace->ntasks) * nspace->ntasks | |
* Every rank is followed either by a comma, a semicolon, or the terminating | |
* '\0', thus each rank requires at most num_digits_10(nspace_ntasks) + 1. | |
* So we need at most: (num_digits_10(nspace->ntasks) + 1) * nspace->ntasks. | |
* | |
* Considering a 1.000.000 core system with 64PPN. | |
* The size of the intermediate buffer will be: | |
* - num_digits_10(1.000.000) = 7 | |
* - (7 + 1) * 1.000.000 ~= 8MB | |
*/ | |
static size_t _proc_map_buffer_size(uint32_t ntasks) | |
{ | |
return (pmixp_count_digits_base10(ntasks) + 1) * ntasks; | |
} | |
/* Build a sequence of ranks sorted by nodes */ | |
static void _build_node2task_map(uint32_t nnodes, uint32_t ntasks, | |
uint32_t *task_cnts, uint32_t *task_map, | |
uint32_t *node2tasks) | |
{ | |
uint32_t *node_offs = xcalloc(nnodes, sizeof(*node_offs)); | |
uint32_t *node_tasks = xcalloc(nnodes, sizeof(*node_tasks)); | |
/* Build the offsets structure needed to fill the node-to-tasks map */ | |
for (int i = 1; i < nnodes; i++) | |
node_offs[i] = node_offs[i - 1] + task_cnts[i - 1]; | |
xassert(ntasks == (node_offs[nnodes - 1] + task_cnts[nnodes - 1])); | |
/* Fill the node-to-task map */ | |
for (int i = 0; i < ntasks; i++) { | |
int node = task_map[i], offset; | |
xassert(node < nnodes); | |
offset = node_offs[node] + node_tasks[node]++; | |
xassert(task_cnts[node] >= node_tasks[node]); | |
node2tasks[offset] = i; | |
} | |
/* Cleanup service structures */ | |
xfree(node_offs); | |
xfree(node_tasks); | |
} | |
char *pmixp_info_get_node_map(hostlist_t hl) | |
{ | |
char *input, *regexp; | |
int rc; | |
input = hostlist_deranged_string_malloc(hl); | |
rc = PMIx_generate_regex(input, ®exp); | |
free(input); | |
if (PMIX_SUCCESS != rc) { | |
return NULL; | |
} | |
return regexp; | |
} | |
char *pmixp_info_get_proc_map(hostlist_t hl, uint32_t nnodes, | |
uint32_t ntasks, uint32_t *task_cnts, | |
uint32_t *task_map) | |
{ | |
char *regexp, *map = NULL, *pos = NULL; | |
uint32_t *node2tasks = NULL, *cur_task = NULL; | |
int rc, i, j; | |
int count = hostlist_count(hl); | |
/* Preallocate the buffer to avoid constant xremalloc() calls. */ | |
map = xmalloc(_proc_map_buffer_size(ntasks)); | |
/* Build a node-to-tasks map that can be traversed in O(n) steps */ | |
node2tasks = xcalloc(ntasks, sizeof(*node2tasks)); | |
_build_node2task_map(nnodes, ntasks, task_cnts, task_map, node2tasks); | |
cur_task = node2tasks; | |
for (i = 0; i < nnodes; i++) { | |
char *sep = ""; | |
/* For each node, provide IDs of the tasks residing on it */ | |
for (j = 0; j < task_cnts[i]; j++){ | |
xstrfmtcatat(map, &pos, "%s%u", sep, *(cur_task++)); | |
sep = ","; | |
} | |
if (i < (count - 1)) { | |
xstrfmtcatat(map, &pos, ";"); | |
} | |
} | |
rc = PMIx_generate_ppn(map, ®exp); | |
xfree(map); | |
xfree(node2tasks); | |
if (PMIX_SUCCESS != rc) { | |
return NULL; | |
} | |
return regexp; | |
} | |
* Estimate the size of a buffer capable of holding the proc map for this job. | |
* PMIx proc map string format: | |
* | |
* xx,yy,...,zz;ll,mm,...,nn;...;aa,bb,...,cc; | |
* - n0 ranks -;- n1 ranks -;...;- nX ranks -; | |
* | |
* To roughly estimate the size of the string we leverage the following | |
* dependency: for any rank \in [0; nspace->ntasks - 1] | |
* num_digits_10(rank) <= num_digits_10(nspace->ntasks) | |
* | |
* So we can say that the cumulative number "digits_cnt" of all symbols | |
* comprising all rank numbers in the namespace is: | |
* digits_size <= num_digits_10(nspace->ntasks) * nspace->ntasks | |
* Every rank is followed either by a comma, a semicolon, or the terminating | |
* '\0', thus each rank requires at most num_digits_10(nspace_ntasks) + 1. | |
* So we need at most: (num_digits_10(nspace->ntasks) + 1) * nspace->ntasks. | |
* | |
* Considering a 1.000.000 core system with 64PPN. | |
* The size of the intermediate buffer will be: | |
* - num_digits_10(1.000.000) = 7 | |
* - (7 + 1) * 1.000.000 ~= 8MB | |
*/ | |
static size_t _proc_map_buffer_size(uint32_t ntasks) | |
{ | |
return (pmixp_count_digits_base10(ntasks) + 1) * ntasks; | |
} | |
/* Build a sequence of ranks sorted by nodes */ | |
static void _build_node2task_map(uint32_t nnodes, uint32_t ntasks, | |
uint32_t *task_cnts, uint32_t *task_map, | |
uint32_t *node2tasks) | |
{ | |
uint32_t *node_offs = xcalloc(nnodes, sizeof(*node_offs)); | |
uint32_t *node_tasks = xcalloc(nnodes, sizeof(*node_tasks)); | |
/* Build the offsets structure needed to fill the node-to-tasks map */ | |
for (int i = 1; i < nnodes; i++) | |
node_offs[i] = node_offs[i - 1] + task_cnts[i - 1]; | |
xassert(ntasks == (node_offs[nnodes - 1] + task_cnts[nnodes - 1])); | |
/* Fill the node-to-task map */ | |
for (int i = 0; i < ntasks; i++) { | |
int node = task_map[i], offset; | |
xassert(node < nnodes); | |
offset = node_offs[node] + node_tasks[node]++; | |
xassert(task_cnts[node] >= node_tasks[node]); | |
node2tasks[offset] = i; | |
} | |
/* Cleanup service structures */ | |
xfree(node_offs); | |
xfree(node_tasks); | |
} | |
char *pmixp_info_get_node_map(hostlist_t hl) | |
{ | |
char *input, *regexp; | |
int rc; | |
input = hostlist_deranged_string_malloc(hl); | |
rc = PMIx_generate_regex(input, ®exp); | |
free(input); | |
if (PMIX_SUCCESS != rc) { | |
return NULL; | |
} | |
return regexp; | |
} | |
char *pmixp_info_get_proc_map(hostlist_t hl, uint32_t nnodes, | |
uint32_t ntasks, uint32_t *task_cnts, | |
uint32_t *task_map) | |
{ | |
char *regexp, *map = NULL, *pos = NULL; | |
uint32_t *node2tasks = NULL, *cur_task = NULL; | |
int rc, i, j; | |
int count = hostlist_count(hl); | |
/* Preallocate the buffer to avoid constant xremalloc() calls. */ | |
map = xmalloc(_proc_map_buffer_size(ntasks)); | |
/* Build a node-to-tasks map that can be traversed in O(n) steps */ | |
node2tasks = xcalloc(ntasks, sizeof(*node2tasks)); | |
_build_node2task_map(nnodes, ntasks, task_cnts, task_map, node2tasks); | |
cur_task = node2tasks; | |
for (i = 0; i < nnodes; i++) { | |
char *sep = ""; | |
/* For each node, provide IDs of the tasks residing on it */ | |
for (j = 0; j < task_cnts[i]; j++){ | |
xstrfmtcatat(map, &pos, "%s%u", sep, *(cur_task++)); | |
sep = ","; | |
} | |
if (i < (count - 1)) { | |
xstrfmtcatat(map, &pos, ";"); | |
} | |
} | |
rc = PMIx_generate_ppn(map, ®exp); | |
xfree(map); | |
xfree(node2tasks); | |
if (PMIX_SUCCESS != rc) { | |
return NULL; | |
} | |
return regexp; | |
} |
* dependency: for any rank \in [0; nspace->ntasks - 1] | ||
* num_digits_10(rank) <= num_digits_10(nspace->ntasks) | ||
* | ||
* So we can say that the cumulative number "digits_cnt" of all symbols | ||
* comprising all rank numbers in the namespace is: | ||
* digits_size <= num_digits_10(nspace->ntasks) * nspace->ntasks | ||
* Every rank is followed either by a comma, a semicolon, or the terminating | ||
* '\0', thus each rank requires at most num_digits_10(nspace_ntasks) + 1. | ||
* So we need at most: (num_digits_10(nspace->ntasks) + 1) * nspace->ntasks. | ||
* | ||
* Considering a 1.000.000 core system with 64PPN. | ||
* The size of the intermediate buffer will be: | ||
* - num_digits_10(1.000.000) = 7 | ||
* - (7 + 1) * 1.000.000 ~= 8MB | ||
*/ | ||
static size_t _proc_map_buffer_size(uint32_t ntasks) | ||
{ | ||
return (pmixp_count_digits_base10(ntasks) + 1) * ntasks; | ||
} | ||
|
||
/* Build a sequence of ranks sorted by nodes */ | ||
static void _build_node2task_map(uint32_t nnodes, uint32_t ntasks, | ||
uint32_t *task_cnts, uint32_t *task_map, | ||
uint32_t *node2tasks) | ||
{ | ||
uint32_t *node_offs = xcalloc(nnodes, sizeof(*node_offs)); | ||
uint32_t *node_tasks = xcalloc(nnodes, sizeof(*node_tasks)); | ||
|
||
/* Build the offsets structure needed to fill the node-to-tasks map */ | ||
for (int i = 1; i < nnodes; i++) | ||
node_offs[i] = node_offs[i - 1] + task_cnts[i - 1]; | ||
|
||
xassert(ntasks == (node_offs[nnodes - 1] + task_cnts[nnodes - 1])); | ||
|
||
/* Fill the node-to-task map */ | ||
for (int i = 0; i < ntasks; i++) { | ||
int node = task_map[i], offset; | ||
xassert(node < nnodes); | ||
offset = node_offs[node] + node_tasks[node]++; | ||
xassert(task_cnts[node] >= node_tasks[node]); | ||
node2tasks[offset] = i; | ||
} | ||
|
||
/* Cleanup service structures */ | ||
xfree(node_offs); | ||
xfree(node_tasks); | ||
} | ||
|
||
char *pmixp_info_get_node_map(hostlist_t hl) | ||
{ | ||
char *input, *regexp; | ||
int rc; | ||
|
||
input = hostlist_deranged_string_malloc(hl); | ||
rc = PMIx_generate_regex(input, ®exp); | ||
free(input); | ||
if (PMIX_SUCCESS != rc) { | ||
return NULL; | ||
} | ||
return regexp; | ||
} | ||
|
||
char *pmixp_info_get_proc_map(hostlist_t hl, uint32_t nnodes, | ||
uint32_t ntasks, uint32_t *task_cnts, | ||
uint32_t *task_map) | ||
{ | ||
char *regexp, *map = NULL, *pos = NULL; | ||
uint32_t *node2tasks = NULL, *cur_task = NULL; | ||
int rc, i, j; | ||
int count = hostlist_count(hl); | ||
|
||
/* Preallocate the buffer to avoid constant xremalloc() calls. */ | ||
map = xmalloc(_proc_map_buffer_size(ntasks)); | ||
|
||
/* Build a node-to-tasks map that can be traversed in O(n) steps */ | ||
node2tasks = xcalloc(ntasks, sizeof(*node2tasks)); | ||
_build_node2task_map(nnodes, ntasks, task_cnts, task_map, node2tasks); | ||
cur_task = node2tasks; | ||
|
||
for (i = 0; i < nnodes; i++) { | ||
char *sep = ""; | ||
/* For each node, provide IDs of the tasks residing on it */ | ||
for (j = 0; j < task_cnts[i]; j++){ | ||
xstrfmtcatat(map, &pos, "%s%u", sep, *(cur_task++)); | ||
sep = ","; | ||
} | ||
if (i < (count - 1)) { | ||
xstrfmtcatat(map, &pos, ";"); | ||
} | ||
} | ||
rc = PMIx_generate_ppn(map, ®exp); | ||
xfree(map); | ||
xfree(node2tasks); | ||
|
||
if (PMIX_SUCCESS != rc) { | ||
return NULL; | ||
} | ||
|
||
return regexp; | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Move to pmixp_client
* Estimate the size of a buffer capable of holding the proc map for this job. | |
* PMIx proc map string format: | |
* | |
* xx,yy,...,zz;ll,mm,...,nn;...;aa,bb,...,cc; | |
* - n0 ranks -;- n1 ranks -;...;- nX ranks -; | |
* | |
* To roughly estimate the size of the string we leverage the following | |
* dependency: for any rank \in [0; nspace->ntasks - 1] | |
* num_digits_10(rank) <= num_digits_10(nspace->ntasks) | |
* | |
* So we can say that the cumulative number "digits_cnt" of all symbols | |
* comprising all rank numbers in the namespace is: | |
* digits_size <= num_digits_10(nspace->ntasks) * nspace->ntasks | |
* Every rank is followed either by a comma, a semicolon, or the terminating | |
* '\0', thus each rank requires at most num_digits_10(nspace_ntasks) + 1. | |
* So we need at most: (num_digits_10(nspace->ntasks) + 1) * nspace->ntasks. | |
* | |
* Considering a 1.000.000 core system with 64PPN. | |
* The size of the intermediate buffer will be: | |
* - num_digits_10(1.000.000) = 7 | |
* - (7 + 1) * 1.000.000 ~= 8MB | |
*/ | |
static size_t _proc_map_buffer_size(uint32_t ntasks) | |
{ | |
return (pmixp_count_digits_base10(ntasks) + 1) * ntasks; | |
} | |
/* Build a sequence of ranks sorted by nodes */ | |
static void _build_node2task_map(uint32_t nnodes, uint32_t ntasks, | |
uint32_t *task_cnts, uint32_t *task_map, | |
uint32_t *node2tasks) | |
{ | |
uint32_t *node_offs = xcalloc(nnodes, sizeof(*node_offs)); | |
uint32_t *node_tasks = xcalloc(nnodes, sizeof(*node_tasks)); | |
/* Build the offsets structure needed to fill the node-to-tasks map */ | |
for (int i = 1; i < nnodes; i++) | |
node_offs[i] = node_offs[i - 1] + task_cnts[i - 1]; | |
xassert(ntasks == (node_offs[nnodes - 1] + task_cnts[nnodes - 1])); | |
/* Fill the node-to-task map */ | |
for (int i = 0; i < ntasks; i++) { | |
int node = task_map[i], offset; | |
xassert(node < nnodes); | |
offset = node_offs[node] + node_tasks[node]++; | |
xassert(task_cnts[node] >= node_tasks[node]); | |
node2tasks[offset] = i; | |
} | |
/* Cleanup service structures */ | |
xfree(node_offs); | |
xfree(node_tasks); | |
} | |
char *pmixp_info_get_node_map(hostlist_t hl) | |
{ | |
char *input, *regexp; | |
int rc; | |
input = hostlist_deranged_string_malloc(hl); | |
rc = PMIx_generate_regex(input, ®exp); | |
free(input); | |
if (PMIX_SUCCESS != rc) { | |
return NULL; | |
} | |
return regexp; | |
} | |
char *pmixp_info_get_proc_map(hostlist_t hl, uint32_t nnodes, | |
uint32_t ntasks, uint32_t *task_cnts, | |
uint32_t *task_map) | |
{ | |
char *regexp, *map = NULL, *pos = NULL; | |
uint32_t *node2tasks = NULL, *cur_task = NULL; | |
int rc, i, j; | |
int count = hostlist_count(hl); | |
/* Preallocate the buffer to avoid constant xremalloc() calls. */ | |
map = xmalloc(_proc_map_buffer_size(ntasks)); | |
/* Build a node-to-tasks map that can be traversed in O(n) steps */ | |
node2tasks = xcalloc(ntasks, sizeof(*node2tasks)); | |
_build_node2task_map(nnodes, ntasks, task_cnts, task_map, node2tasks); | |
cur_task = node2tasks; | |
for (i = 0; i < nnodes; i++) { | |
char *sep = ""; | |
/* For each node, provide IDs of the tasks residing on it */ | |
for (j = 0; j < task_cnts[i]; j++){ | |
xstrfmtcatat(map, &pos, "%s%u", sep, *(cur_task++)); | |
sep = ","; | |
} | |
if (i < (count - 1)) { | |
xstrfmtcatat(map, &pos, ";"); | |
} | |
} | |
rc = PMIx_generate_ppn(map, ®exp); | |
xfree(map); | |
xfree(node2tasks); | |
if (PMIX_SUCCESS != rc) { | |
return NULL; | |
} | |
return regexp; | |
} | |
* Estimate the size of a buffer capable of holding the proc map for this job. | |
* PMIx proc map string format: | |
* | |
* xx,yy,...,zz;ll,mm,...,nn;...;aa,bb,...,cc; | |
* - n0 ranks -;- n1 ranks -;...;- nX ranks -; | |
* | |
* To roughly estimate the size of the string we leverage the following | |
* dependency: for any rank \in [0; nspace->ntasks - 1] | |
* num_digits_10(rank) <= num_digits_10(nspace->ntasks) | |
* | |
* So we can say that the cumulative number "digits_cnt" of all symbols | |
* comprising all rank numbers in the namespace is: | |
* digits_size <= num_digits_10(nspace->ntasks) * nspace->ntasks | |
* Every rank is followed either by a comma, a semicolon, or the terminating | |
* '\0', thus each rank requires at most num_digits_10(nspace_ntasks) + 1. | |
* So we need at most: (num_digits_10(nspace->ntasks) + 1) * nspace->ntasks. | |
* | |
* Considering a 1.000.000 core system with 64PPN. | |
* The size of the intermediate buffer will be: | |
* - num_digits_10(1.000.000) = 7 | |
* - (7 + 1) * 1.000.000 ~= 8MB | |
*/ | |
static size_t _proc_map_buffer_size(uint32_t ntasks) | |
{ | |
return (pmixp_count_digits_base10(ntasks) + 1) * ntasks; | |
} | |
/* Build a sequence of ranks sorted by nodes */ | |
static void _build_node2task_map(uint32_t nnodes, uint32_t ntasks, | |
uint32_t *task_cnts, uint32_t *task_map, | |
uint32_t *node2tasks) | |
{ | |
uint32_t *node_offs = xcalloc(nnodes, sizeof(*node_offs)); | |
uint32_t *node_tasks = xcalloc(nnodes, sizeof(*node_tasks)); | |
/* Build the offsets structure needed to fill the node-to-tasks map */ | |
for (int i = 1; i < nnodes; i++) | |
node_offs[i] = node_offs[i - 1] + task_cnts[i - 1]; | |
xassert(ntasks == (node_offs[nnodes - 1] + task_cnts[nnodes - 1])); | |
/* Fill the node-to-task map */ | |
for (int i = 0; i < ntasks; i++) { | |
int node = task_map[i], offset; | |
xassert(node < nnodes); | |
offset = node_offs[node] + node_tasks[node]++; | |
xassert(task_cnts[node] >= node_tasks[node]); | |
node2tasks[offset] = i; | |
} | |
/* Cleanup service structures */ | |
xfree(node_offs); | |
xfree(node_tasks); | |
} | |
char *pmixp_info_get_node_map(hostlist_t hl) | |
{ | |
char *input, *regexp; | |
int rc; | |
input = hostlist_deranged_string_malloc(hl); | |
rc = PMIx_generate_regex(input, ®exp); | |
free(input); | |
if (PMIX_SUCCESS != rc) { | |
return NULL; | |
} | |
return regexp; | |
} | |
char *pmixp_info_get_proc_map(hostlist_t hl, uint32_t nnodes, | |
uint32_t ntasks, uint32_t *task_cnts, | |
uint32_t *task_map) | |
{ | |
char *regexp, *map = NULL, *pos = NULL; | |
uint32_t *node2tasks = NULL, *cur_task = NULL; | |
int rc, i, j; | |
int count = hostlist_count(hl); | |
/* Preallocate the buffer to avoid constant xremalloc() calls. */ | |
map = xmalloc(_proc_map_buffer_size(ntasks)); | |
/* Build a node-to-tasks map that can be traversed in O(n) steps */ | |
node2tasks = xcalloc(ntasks, sizeof(*node2tasks)); | |
_build_node2task_map(nnodes, ntasks, task_cnts, task_map, node2tasks); | |
cur_task = node2tasks; | |
for (i = 0; i < nnodes; i++) { | |
char *sep = ""; | |
/* For each node, provide IDs of the tasks residing on it */ | |
for (j = 0; j < task_cnts[i]; j++){ | |
xstrfmtcatat(map, &pos, "%s%u", sep, *(cur_task++)); | |
sep = ","; | |
} | |
if (i < (count - 1)) { | |
xstrfmtcatat(map, &pos, ";"); | |
} | |
} | |
rc = PMIx_generate_ppn(map, ®exp); | |
xfree(map); | |
xfree(node2tasks); | |
if (PMIX_SUCCESS != rc) { | |
return NULL; | |
} | |
return regexp; | |
} |
} | ||
|
||
/* | ||
* Estimate the size of a buffer capable of holding the proc map for this job. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
move to pmixp_client: BEGIN
} | ||
|
||
return regexp; | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
move to pmixp_client: END
|
||
void pmixp_info_set_init(void) | ||
{ | ||
_pmixp_info.initialized = 1; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
make this a regular variable.
@@ -55,15 +58,16 @@ static bool _srv_use_direct_conn_ucx = false; | |||
static int _srv_fence_coll_type = PMIXP_COLL_TYPE_FENCE_MAX; | |||
static bool _srv_fence_coll_barrier = false; | |||
|
|||
pmix_jobinfo_t _pmixp_job_info; | |||
pmixp_info_t _pmixp_info; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
pmixp_info_t _pmixp_info; | |
pmixp_info_t _pmixp_info; | |
static volatile int _was_initialized = 0; |
pmixp_srun_info_t srun; | ||
pmixp_stepd_info_t stepd; | ||
}; | ||
volatile int initialized; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
volatile int initialized; |
No description provided.