Skip to content

Commit

Permalink
Merge branch 'adb-6.x-dev' into ADBDEV-4486
Browse files Browse the repository at this point in the history
  • Loading branch information
andr-sokolov authored Jan 24, 2024
2 parents 81ff88b + 85fb13c commit 83fe977
Show file tree
Hide file tree
Showing 16 changed files with 2,616 additions and 204 deletions.
1 change: 1 addition & 0 deletions .abi-check/6.26.0/postgres.symbols.ignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
ConfigureNamesBool_gp
22 changes: 17 additions & 5 deletions .github/workflows/greenplum-abi-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,15 @@ jobs:
BASELINE_VERSION: ${{ steps.vars.outputs.BASELINE_VERSION }}
ABI_LIBS: ${{ steps.vars.outputs.ABI_LIBS }}
ABI_HEADERS: ${{ steps.vars.outputs.ABI_HEADERS }}
EXCEPTION_LISTS_COUNT: ${{ steps.check_exception_lists.outputs.EXCEPTION_LISTS_COUNT }}
steps:
- name: Fetch source
uses: actions/checkout@v3

- name: Get Greenplum version variables
id: vars
run: |
remote_repo='https://github.com/greenplum-db/gpdb.git'
remote_repo='https://github.com/arenadata/gpdb.git'
git ls-remote --tags --refs --sort='v:refname' $remote_repo '6.*' | tail -n 1 > baseline_version_ref
baseline_ref=$(cat baseline_version_ref | awk '{print $1}')
baseline_version=$(cat baseline_version_ref | awk '{print $2}')
Expand All @@ -44,7 +45,14 @@ jobs:
echo "ABI_LIBS=postgres" | tee -a $GITHUB_OUTPUT
echo "ABI_HEADERS=." | tee -a $GITHUB_OUTPUT
- name: Check if exception list exists
id: check_exception_lists
run: |
exception_lists_count=$(ls .abi-check/${{ steps.vars.outputs.BASELINE_VERSION }}/ 2> /dev/null | wc -l)
echo "EXCEPTION_LISTS_COUNT=${exception_lists_count}" | tee -a $GITHUB_OUTPUT
- name: Upload symbol/type checking exception list
if: steps.check_exception_lists.outputs.EXCEPTION_LISTS_COUNT != '0'
uses: actions/upload-artifact@v3
with:
name: exception_lists
Expand All @@ -53,15 +61,15 @@ jobs:
abi-dump:
needs: abi-dump-setup
runs-on: ubuntu-latest
container: gcr.io/data-gpdb-public-images/gpdb6-rocky8-build
container: gcr.io/data-gpdb-public-images/gpdb6-centos7-build
strategy:
matrix:
name:
- build-baseline
- build-latest
include:
- name: build-baseline
repo: greenplum-db/gpdb
repo: arenadata/gpdb
ref: ${{ needs.abi-dump-setup.outputs.BASELINE_VERSION }}
- name: build-latest
repo: ${{ github.repository }}
Expand All @@ -76,6 +84,8 @@ jobs:
tar -xf uctags-2023.07.05-linux-x86_64.tar.xz
cp uctags-2023.07.05-linux-x86_64/bin/* /usr/bin/
which ctags
yum install -y https://packages.endpointdev.com/rhel/7/os/x86_64/endpoint-repo.x86_64.rpm
yum install -y git
- name: Download Greenplum source code
uses: actions/checkout@v3
Expand All @@ -90,6 +100,7 @@ jobs:
run: |
yum install -y epel-release
yum install -y abi-dumper
yum install -y libzstd-static
- name: Build Greenplum
run: |
Expand Down Expand Up @@ -122,7 +133,7 @@ jobs:
- abi-dump-setup
- abi-dump
runs-on: ubuntu-latest
container: gcr.io/data-gpdb-public-images/gpdb6-rocky8-build
container: gcr.io/data-gpdb-public-images/gpdb6-centos7-build
steps:
- name: Download baseline
uses: actions/download-artifact@v3
Expand All @@ -136,6 +147,7 @@ jobs:
path: build-latest/

- name: Download exception lists
if: needs.abi-dump-setup.outputs.EXCEPTION_LISTS_COUNT != '0'
uses: actions/download-artifact@v3
with:
name: exception_lists
Expand All @@ -145,7 +157,7 @@ jobs:
run: |
yum install -y epel-release
yum install -y abi-compliance-checker
yum install -y --enablerepo=powertools lynx
yum install -y lynx
- name: Compare ABI
run: |
Expand Down
47 changes: 4 additions & 43 deletions gpAux/gpperfmon/src/gpmon/gpmon_agg.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,6 @@ extern mmon_options_t opt;
extern apr_queue_t* message_queue;

extern void incremement_tail_bytes(apr_uint64_t bytes);
static bool is_query_not_active(apr_int32_t tmid, apr_int32_t ssid,
apr_int32_t ccnt, apr_hash_t *hash, apr_pool_t *pool);

/**
* Disk space check helper function
Expand Down Expand Up @@ -167,43 +165,6 @@ static apr_status_t check_disk_space(mmon_fsinfo_t* rec)
return 0;
}

static bool is_query_not_active(apr_int32_t tmid, apr_int32_t ssid, apr_int32_t ccnt, apr_hash_t *hash, apr_pool_t *pool)
{
// get active query of session
char *key = apr_psprintf(pool, "%d", ssid);
char *active_query = apr_hash_get(hash, key, APR_HASH_KEY_STRING);
if (active_query == NULL)
{
TR0(("Found orphan query, tmid:%d, ssid:%d, ccnt:%d\n", tmid, ssid, ccnt));
return true;
}

// read query text from q file
char *query = get_query_text(tmid, ssid, ccnt, pool);
if (query == NULL)
{
TR0(("Found error while reading query text in file '%sq%d-%d-%d.txt'\n", GPMON_DIR, tmid, ssid, ccnt));
return true;
}
// if the current active query of session (ssid) is not the same
// as the one we are checking, we assume q(tmid)-(ssid)-(ccnt).txt
// has wrong status. This is a bug in execMain.c, which too hard to
// fix it there.
int qlen = strlen(active_query);
if (qlen > MAX_QUERY_COMPARE_LENGTH)
{
qlen = MAX_QUERY_COMPARE_LENGTH;
}
int res = strncmp(query, active_query, qlen);
if (res != 0)
{
TR0(("Found orphan query, tmid:%d, ssid:%d, ccnt:%d\n", tmid, ssid, ccnt));
return true;
}

return false;
}

static apr_status_t agg_put_fsinfo(agg_t* agg, const gpmon_fsinfo_t* met)
{
mmon_fsinfo_t* rec;
Expand Down Expand Up @@ -481,8 +442,8 @@ apr_status_t agg_dup(agg_t** retagg, agg_t* oldagg, apr_pool_t* parent_pool, apr
return e;
}

apr_hash_t *active_query_tab = get_active_queries(newagg->pool);
if (! active_query_tab)
apr_hash_t *active_session_set = get_active_sessions(newagg->pool);
if (!active_session_set)
{
agg_destroy(newagg);
return APR_EINVAL;
Expand All @@ -508,8 +469,8 @@ apr_status_t agg_dup(agg_t** retagg, agg_t* oldagg, apr_pool_t* parent_pool, apr
if ( (status != GPMON_QLOG_STATUS_SUBMIT
&& status != GPMON_QLOG_STATUS_CANCELING
&& status != GPMON_QLOG_STATUS_START)
|| ((age % 5 == 0) /* don't call is_query_not_active every time because it's expensive */
&& is_query_not_active(dp->qlog.key.tmid, dp->qlog.key.ssid, dp->qlog.key.ccnt, active_query_tab, newagg->pool)))
|| apr_hash_get(active_session_set, &dp->qlog.key.ssid,
sizeof(dp->qlog.key.ssid)) == NULL)
{
if (0 != strcmp(dp->qlog.db, GPMON_DB))
{
Expand Down
26 changes: 12 additions & 14 deletions gpAux/gpperfmon/src/gpmon/gpmondb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1335,24 +1335,22 @@ static void convert_tuples_to_hash(PGresult *result, apr_hash_t *hash, apr_pool_
int i = 0;
for (; i < rowcount; i++)
{
char* sessid = PQgetvalue(result, i, 0);
char* query = PQgetvalue(result, i, 1);

char *sessid_copy = apr_pstrdup(pool, sessid);
char *query_copy = apr_pstrdup(pool, query);
if (sessid_copy == NULL || query_copy == NULL)
apr_int32_t* ssid = apr_palloc(pool, sizeof(apr_int32_t));
if (ssid == NULL)
{
gpmon_warning(FLINE, "Out of memory");
continue;
}
apr_hash_set(hash, sessid_copy, APR_HASH_KEY_STRING, query_copy);
*ssid = atoi(PQgetvalue(result, i, 0));

apr_hash_set(hash, ssid, sizeof(apr_int32_t), "");
}
}

apr_hash_t *get_active_queries(apr_pool_t *pool)
apr_hash_t *get_active_sessions(apr_pool_t *pool)
{
PGresult *result = NULL;
apr_hash_t *active_query_tab = NULL;
apr_hash_t *active_session_set = NULL;

PGconn *conn = PQconnectdb(GPDB_CONNECTION_STRING);
if (PQstatus(conn) != CONNECTION_OK)
Expand All @@ -1366,29 +1364,29 @@ apr_hash_t *get_active_queries(apr_pool_t *pool)
return NULL;
}

const char *qry= "SELECT sess_id, query FROM pg_stat_activity;";
const char *qry= "SELECT sess_id FROM pg_stat_activity;";
const char *errmsg = gpdb_exec_only(conn, &result, qry);
if (errmsg)
{
gpmon_warning(FLINE, "check query status failed : %s", errmsg);
}
else
{
active_query_tab = apr_hash_make(pool);
if (! active_query_tab)
active_session_set = apr_hash_make(pool);
if (!active_session_set)
{
gpmon_warning(FLINE, "Out of memory");
}
else
{
convert_tuples_to_hash(result, active_query_tab, pool);
convert_tuples_to_hash(result, active_session_set, pool);
}
}

PQclear(result);
PQfinish(conn);

return active_query_tab;
return active_session_set;
}

const char *iconv_encodings[] = {
Expand Down
2 changes: 1 addition & 1 deletion gpAux/gpperfmon/src/gpmon/gpmondb.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ APR_DECLARE (apr_status_t) gpdb_harvest_one(const char* table);

APR_DECLARE (apr_status_t) remove_segid_constraint(void);

APR_DECLARE (apr_hash_t *) get_active_queries(apr_pool_t* pool);
APR_DECLARE (apr_hash_t *) get_active_sessions(apr_pool_t* pool);

APR_DECLARE (void) create_log_alert_table(void);

Expand Down
11 changes: 11 additions & 0 deletions gpMgmt/test/behave/mgmt_utils/gpperfmon.feature
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,17 @@ Feature: gpperfmon
Then wait until the results from boolean sql "SELECT count(*) = 0 FROM queries_history WHERE query_text like '--alter distributed by%'" is "true"
And wait until the results from boolean sql "SELECT count(*) = 1 FROM queries_history WHERE query_text like '--end flag%'" is "true"

@gpperfmon_query_history
Scenario: gpperfmon does not lose the query text if its text differs from the text in pg_stat_activity
Given gpperfmon is configured and running in qamode
When the user truncates "queries_history" tables in "gpperfmon"
When below sql is executed in "gptest" db
"""
SET log_min_messages = "debug4";
DO $$ BEGIN PERFORM pg_sleep(80); END$$;
"""
Then wait until the results from boolean sql "SELECT count(*) > 0 FROM queries_history WHERE query_text = 'SELECT pg_sleep(80)'" is "true"

@gpperfmon_system_history
Scenario: gpperfmon adds to system_history table
Given gpperfmon is configured and running in qamode
Expand Down
25 changes: 21 additions & 4 deletions src/backend/cdb/cdbllize.c
Original file line number Diff line number Diff line change
Expand Up @@ -590,8 +590,16 @@ ParallelizeCorrelatedSubPlanMutator(Node *node, ParallelizeCorrelatedPlanWalkerC
scanPlan->flow->flotype = FLOW_SINGLETON;
}

broadcastPlan(scanPlan, false /* stable */ , false /* rescannable */,
ctx->currentPlanFlow->numsegments /* numsegments */);
/*
* Broadcasting Replicated locus leads to data duplicates.
*/
if (scanPlan->flow->locustype == CdbLocusType_Replicated &&
scanPlan->flow->numsegments != ctx->currentPlanFlow->numsegments)
elog(ERROR, "could not parallelize SubPlan");

if (scanPlan->flow->locustype != CdbLocusType_Replicated)
broadcastPlan(scanPlan, false /* stable */ , false /* rescannable */ ,
ctx->currentPlanFlow->numsegments /* numsegments */ );
}
else
{
Expand Down Expand Up @@ -758,8 +766,17 @@ ParallelizeSubplan(SubPlan *spExpr, PlanProfile *context)
if (containingPlanDistributed)
{
Assert(NULL != context->currentPlanFlow);
broadcastPlan(newPlan, false /* stable */ , false /* rescannable */,
context->currentPlanFlow->numsegments /* numsegments */);

/*
* Broadcasting Replicated locus leads to data duplicates.
*/
if (newPlan->flow->locustype == CdbLocusType_Replicated &&
newPlan->flow->numsegments != context->currentPlanFlow->numsegments)
elog(ERROR, "could not parallelize SubPlan");

if (newPlan->flow->locustype != CdbLocusType_Replicated)
broadcastPlan(newPlan, false /* stable */ , false /* rescannable */,
context->currentPlanFlow->numsegments /* numsegments */);
}
else
{
Expand Down
3 changes: 2 additions & 1 deletion src/backend/cdb/cdbmutate.c
Original file line number Diff line number Diff line change
Expand Up @@ -621,7 +621,8 @@ apply_motion(PlannerInfo *root, Plan *plan, Query *query)
{
if ((plan->flow->flotype == FLOW_PARTITIONED ||
(plan->flow->flotype == FLOW_SINGLETON &&
plan->flow->locustype == CdbLocusType_SegmentGeneral)) &&
plan->flow->locustype == CdbLocusType_SegmentGeneral) ||
plan->flow->flotype == FLOW_REPLICATED) &&
!root->glob->is_parallel_cursor)
bringResultToDispatcher = true;

Expand Down
Loading

0 comments on commit 83fe977

Please sign in to comment.