From 48078a30e60258a023e04dad282c51d3ec14b2cc Mon Sep 17 00:00:00 2001 From: Hadi Moshayedi Date: Mon, 23 Sep 2019 14:08:31 -0700 Subject: [PATCH] Fix wait_until_metadata_sync() for postgres 12. Postgres 12 now has an assertion that the calls to WaitLatchOrSocket handle postmaster death. --- src/backend/distributed/test/metadata_sync.c | 12 +++++++++--- src/test/regress/expected/local_shard_execution.out | 4 +++- .../regress/expected/local_shard_execution_0.out | 4 +++- src/test/regress/sql/local_shard_execution.sql | 4 +++- 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/backend/distributed/test/metadata_sync.c b/src/backend/distributed/test/metadata_sync.c index 123a5492b..f571b2ee5 100644 --- a/src/backend/distributed/test/metadata_sync.c +++ b/src/backend/distributed/test/metadata_sync.c @@ -84,6 +84,7 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS) ListCell *workerCell = NULL; bool waitNotifications = false; MultiConnection *connection = NULL; + int waitFlags = 0; foreach(workerCell, workerList) { @@ -112,9 +113,14 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS) "localhost", PostPortNumber); ExecuteCriticalRemoteCommand(connection, "LISTEN " METADATA_SYNC_CHANNEL); - waitResult = WaitLatchOrSocket(NULL, WL_SOCKET_READABLE | WL_TIMEOUT, - PQsocket(connection->pgConn), timeout, 0); - if (waitResult & WL_SOCKET_MASK) + waitFlags = WL_SOCKET_READABLE | WL_TIMEOUT | WL_POSTMASTER_DEATH; + waitResult = WaitLatchOrSocket(NULL, waitFlags, PQsocket(connection->pgConn), + timeout, 0); + if (waitResult & WL_POSTMASTER_DEATH) + { + ereport(ERROR, (errmsg("postmaster was shut down, exiting"))); + } + else if (waitResult & WL_SOCKET_MASK) { ClearResults(connection, true); } diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index 00ed9a8a5..7f3bbb3ee 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -705,7 +705,9 @@ SELECT FROM distributed_table, all_data WHERE - distributed_table.key = all_data.key AND distributed_table.key = 1 AND EXISTS (SELECT * FROM all_data); + distributed_table.key = all_data.key AND distributed_table.key = 1 + -- the following is to avoid CTE inlining + AND EXISTS (SELECT * FROM all_data); count ------- 1 diff --git a/src/test/regress/expected/local_shard_execution_0.out b/src/test/regress/expected/local_shard_execution_0.out index ec0c44e01..b37e37a9a 100644 --- a/src/test/regress/expected/local_shard_execution_0.out +++ b/src/test/regress/expected/local_shard_execution_0.out @@ -691,7 +691,9 @@ SELECT FROM distributed_table, all_data WHERE - distributed_table.key = all_data.key AND distributed_table.key = 1 AND EXISTS (SELECT * FROM all_data); + distributed_table.key = all_data.key AND distributed_table.key = 1 + -- the following is to avoid CTE inlining + AND EXISTS (SELECT * FROM all_data); count ------- 1 diff --git a/src/test/regress/sql/local_shard_execution.sql b/src/test/regress/sql/local_shard_execution.sql index 72e36bccd..7cd7e4027 100644 --- a/src/test/regress/sql/local_shard_execution.sql +++ b/src/test/regress/sql/local_shard_execution.sql @@ -425,7 +425,9 @@ SELECT FROM distributed_table, all_data WHERE - distributed_table.key = all_data.key AND distributed_table.key = 1 AND EXISTS (SELECT * FROM all_data); + distributed_table.key = all_data.key AND distributed_table.key = 1 + -- the following is to avoid CTE inlining + AND EXISTS (SELECT * FROM all_data); -- get ready for the next commands