diff --git a/.devcontainer/.psqlrc b/.devcontainer/.psqlrc index 7642a9714..07ea06cdd 100644 --- a/.devcontainer/.psqlrc +++ b/.devcontainer/.psqlrc @@ -3,5 +3,5 @@ \pset border 2 \setenv PAGER 'pspg --no-mouse -bX --no-commandbar --no-topbar' \set HISTSIZE 100000 -\set PROMPT1 '\n%[%033[1m%]%M %n@%/:%>-%p%R%[%033[0m%]%# ' +\set PROMPT1 '\n%[%033[1m%]%M %n@%/:%> (PID: %p)%R%[%033[0m%]%# ' \set PROMPT2 ' ' diff --git a/.devcontainer/.vscode/launch.json b/.devcontainer/.vscode/launch.json index 290f6573a..6de90ce09 100644 --- a/.devcontainer/.vscode/launch.json +++ b/.devcontainer/.vscode/launch.json @@ -16,5 +16,25 @@ } ], }, - ] + { + "name": "Open core file", + "type": "cppdbg", + "request": "launch", + "program": "/home/citus/.pgenv/pgsql/bin/postgres", + "coreDumpPath": "${input:corefile}", + "cwd": "${workspaceFolder}", + "MIMode": "gdb", + } + ], + "inputs": [ + { + "id": "corefile", + "type": "command", + "command": "extension.commandvariable.file.pickFile", + "args": { + "dialogTitle": "Select core file", + "include": "**/core*", + }, + }, + ], } diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 11fb010b7..9c0b011f0 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -68,7 +68,7 @@ USER citus # build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions FROM base AS pg14 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.10 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.15 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN rm .pgenv-staging/config/default.conf FROM base AS pg15 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.5 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.10 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN rm .pgenv-staging/config/default.conf FROM base AS pg16 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.1 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.6 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -152,6 +152,7 @@ RUN sudo apt update \ lsof \ man \ net-tools \ + psmisc \ pspg \ tree \ vim \ @@ -210,7 +211,7 @@ COPY --chown=citus:citus .psqlrc . RUN sudo chown --from=root:root citus:citus -R ~ # sets default pg version -RUN pgenv switch 16.1 +RUN pgenv switch 16.6 # make connecting to the coordinator easy ENV PGPORT=9700 diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 58c9e07a8..cddfcebf4 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,8 +2,11 @@ "image": "ghcr.io/citusdata/citus-devcontainer:main", "runArgs": [ "--cap-add=SYS_PTRACE", + "--ulimit=core=-1", + ], + "forwardPorts": [ + 9700 ], - "forwardPorts": [9700], "customizations": { "vscode": { "extensions": [ @@ -14,6 +17,7 @@ "github.vscode-pull-request-github", "ms-vscode.cpptools-extension-pack", "ms-vsliveshare.vsliveshare", + "rioj7.command-variable", ], "settings": { "files.exclude": { @@ -30,3 +34,4 @@ "updateContentCommand": "./configure", "postCreateCommand": "make -C .devcontainer/", } + diff --git a/.devcontainer/src/test/regress/Pipfile b/.devcontainer/src/test/regress/Pipfile index d4b2cc39f..8811bbd8c 100644 --- a/.devcontainer/src/test/regress/Pipfile +++ b/.devcontainer/src/test/regress/Pipfile @@ -5,7 +5,7 @@ verify_ssl = true [packages] mitmproxy = {editable = true, ref = "main", git = "https://github.com/citusdata/mitmproxy.git"} -construct = "==2.9.45" +construct = "*" docopt = "==0.6.2" cryptography = ">=41.0.4" pytest = "*" @@ -16,6 +16,7 @@ pytest-timeout = "*" pytest-xdist = "*" pytest-repeat = "*" pyyaml = "*" +werkzeug = "==2.3.7" [dev-packages] black = "*" diff --git a/.devcontainer/src/test/regress/Pipfile.lock b/.devcontainer/src/test/regress/Pipfile.lock index bdb42a1c3..fb82a6573 100644 --- a/.devcontainer/src/test/regress/Pipfile.lock +++ b/.devcontainer/src/test/regress/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "b92bf682aeeea1a66a16beaf78584a5318fd0ae908ce85c7e2a4807aa2bee532" + "sha256": "f8db86383082539f626f1402e720f5f2e3f9718b44a8f26110cf9f52e7ca46bc" }, "pipfile-spec": 6, "requires": { @@ -119,11 +119,11 @@ }, "certifi": { "hashes": [ - "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082", - "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9" + "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f", + "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1" ], "markers": "python_version >= '3.6'", - "version": "==2023.7.22" + "version": "==2024.2.2" }, "cffi": { "hashes": [ @@ -180,7 +180,7 @@ "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" ], - "markers": "python_version >= '3.8'", + "markers": "platform_python_implementation != 'PyPy'", "version": "==1.16.0" }, "click": { @@ -193,40 +193,51 @@ }, "construct": { "hashes": [ - "sha256:2271a0efd0798679dea825ff47e22a4c550456a5db0ba8baa82f7eae0af0118c" + "sha256:4d2472f9684731e58cc9c56c463be63baa1447d674e0d66aeb5627b22f512c29", + "sha256:c80be81ef595a1a821ec69dc16099550ed22197615f4320b57cc9ce2a672cb30" ], "index": "pypi", - "version": "==2.9.45" + "markers": "python_version >= '3.6'", + "version": "==2.10.70" }, "cryptography": { "hashes": [ - "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67", - "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311", - "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8", - "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13", - "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143", - "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f", - "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829", - "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd", - "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397", - "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac", - "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d", - "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a", - "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839", - "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e", - "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6", - "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9", - "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860", - "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca", - "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91", - "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d", - "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714", - "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb", - "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f" + "sha256:04859aa7f12c2b5f7e22d25198ddd537391f1695df7057c8700f71f26f47a129", + "sha256:069d2ce9be5526a44093a0991c450fe9906cdf069e0e7cd67d9dee49a62b9ebe", + "sha256:0d3ec384058b642f7fb7e7bff9664030011ed1af8f852540c76a1317a9dd0d20", + "sha256:0fab2a5c479b360e5e0ea9f654bcebb535e3aa1e493a715b13244f4e07ea8eec", + "sha256:0fea01527d4fb22ffe38cd98951c9044400f6eff4788cf52ae116e27d30a1ba3", + "sha256:1b797099d221df7cce5ff2a1d272761d1554ddf9a987d3e11f6459b38cd300fd", + "sha256:1e935c2900fb53d31f491c0de04f41110351377be19d83d908c1fd502ae8daa5", + "sha256:20100c22b298c9eaebe4f0b9032ea97186ac2555f426c3e70670f2517989543b", + "sha256:20180da1b508f4aefc101cebc14c57043a02b355d1a652b6e8e537967f1e1b46", + "sha256:25b09b73db78facdfd7dd0fa77a3f19e94896197c86e9f6dc16bce7b37a96504", + "sha256:2619487f37da18d6826e27854a7f9d4d013c51eafb066c80d09c63cf24505306", + "sha256:2eb6368d5327d6455f20327fb6159b97538820355ec00f8cc9464d617caecead", + "sha256:35772a6cffd1f59b85cb670f12faba05513446f80352fe811689b4e439b5d89e", + "sha256:39d5c93e95bcbc4c06313fc6a500cee414ee39b616b55320c1904760ad686938", + "sha256:3d96ea47ce6d0055d5b97e761d37b4e84195485cb5a38401be341fabf23bc32a", + "sha256:4dcab7c25e48fc09a73c3e463d09ac902a932a0f8d0c568238b3696d06bf377b", + "sha256:5fbf0f3f0fac7c089308bd771d2c6c7b7d53ae909dce1db52d8e921f6c19bb3a", + "sha256:6c25e1e9c2ce682d01fc5e2dde6598f7313027343bd14f4049b82ad0402e52cd", + "sha256:762f3771ae40e111d78d77cbe9c1035e886ac04a234d3ee0856bf4ecb3749d54", + "sha256:90147dad8c22d64b2ff7331f8d4cddfdc3ee93e4879796f837bdbb2a0b141e0c", + "sha256:935cca25d35dda9e7bd46a24831dfd255307c55a07ff38fd1a92119cffc34857", + "sha256:93fbee08c48e63d5d1b39ab56fd3fdd02e6c2431c3da0f4edaf54954744c718f", + "sha256:9541c69c62d7446539f2c1c06d7046aef822940d248fa4b8962ff0302862cc1f", + "sha256:c23f03cfd7d9826cdcbad7850de67e18b4654179e01fe9bc623d37c2638eb4ef", + "sha256:c3d1f5a1d403a8e640fa0887e9f7087331abb3f33b0f2207d2cc7f213e4a864c", + "sha256:d1998e545081da0ab276bcb4b33cce85f775adb86a516e8f55b3dac87f469548", + "sha256:d5cf11bc7f0b71fb71af26af396c83dfd3f6eed56d4b6ef95d57867bf1e4ba65", + "sha256:db0480ffbfb1193ac4e1e88239f31314fe4c6cdcf9c0b8712b55414afbf80db4", + "sha256:de4ae486041878dc46e571a4c70ba337ed5233a1344c14a0790c4c4be4bbb8b4", + "sha256:de5086cd475d67113ccb6f9fae6d8fe3ac54a4f9238fd08bfdb07b03d791ff0a", + "sha256:df34312149b495d9d03492ce97471234fd9037aa5ba217c2a6ea890e9166f151", + "sha256:ead69ba488f806fe1b1b4050febafdbf206b81fa476126f3e16110c818bac396" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==41.0.4" + "version": "==42.0.3" }, "docopt": { "hashes": [ @@ -237,11 +248,11 @@ }, "exceptiongroup": { "hashes": [ - "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9", - "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3" + "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14", + "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68" ], "markers": "python_version < '3.11'", - "version": "==1.1.3" + "version": "==1.2.0" }, "execnet": { "hashes": [ @@ -253,12 +264,12 @@ }, "filelock": { "hashes": [ - "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4", - "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd" + "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e", + "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==3.12.4" + "version": "==3.13.1" }, "flask": { "hashes": [ @@ -318,11 +329,11 @@ }, "jinja2": { "hashes": [ - "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852", - "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61" + "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa", + "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90" ], "markers": "python_version >= '3.7'", - "version": "==3.1.2" + "version": "==3.1.3" }, "kaitaistruct": { "hashes": [ @@ -342,69 +353,69 @@ }, "markupsafe": { "hashes": [ - "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e", - "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e", - "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431", - "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686", - "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c", - "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559", - "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc", - "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb", - "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939", - "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c", - "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0", - "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4", - "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9", - "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575", - "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba", - "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d", - "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd", - "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3", - "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00", - "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155", - "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac", - "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52", - "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f", - "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8", - "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b", - "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007", - "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24", - "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea", - "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198", - "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0", - "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee", - "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be", - "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2", - "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1", - "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707", - "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6", - "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c", - "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58", - "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823", - "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779", - "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636", - "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c", - "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad", - "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee", - "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc", - "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2", - "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48", - "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7", - "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e", - "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b", - "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa", - "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5", - "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e", - "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb", - "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9", - "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57", - "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc", - "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc", - "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2", - "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11" + "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf", + "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff", + "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f", + "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3", + "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532", + "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f", + "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617", + "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df", + "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4", + "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906", + "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f", + "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4", + "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8", + "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371", + "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2", + "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465", + "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52", + "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6", + "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169", + "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad", + "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2", + "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0", + "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029", + "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f", + "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a", + "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced", + "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5", + "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c", + "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf", + "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9", + "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb", + "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad", + "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3", + "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1", + "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46", + "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc", + "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a", + "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee", + "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900", + "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5", + "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea", + "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f", + "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5", + "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e", + "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a", + "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f", + "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50", + "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a", + "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b", + "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4", + "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff", + "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2", + "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46", + "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b", + "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf", + "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5", + "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5", + "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab", + "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd", + "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68" ], "markers": "python_version >= '3.7'", - "version": "==2.1.3" + "version": "==2.1.5" }, "mitmproxy": { "editable": true, @@ -491,11 +502,11 @@ }, "pluggy": { "hashes": [ - "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12", - "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7" + "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981", + "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be" ], "markers": "python_version >= '3.8'", - "version": "==1.3.0" + "version": "==1.4.0" }, "protobuf": { "hashes": [ @@ -526,12 +537,12 @@ }, "psycopg": { "hashes": [ - "sha256:7542c45810ea16356e5126c9b4291cbc3802aa326fcbba09ff154fe380de29be", - "sha256:cd711edb64b07d7f8a233c365806caf7e55bbe7cbbd8d5c680f672bb5353c8d5" + "sha256:31144d3fb4c17d78094d9e579826f047d4af1da6a10427d91dfcfb6ecdf6f12b", + "sha256:4d5a0a5a8590906daa58ebd5f3cfc34091377354a1acced269dd10faf55da60e" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==3.1.11" + "version": "==3.1.18" }, "publicsuffix2": { "hashes": [ @@ -542,11 +553,11 @@ }, "pyasn1": { "hashes": [ - "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57", - "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde" + "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58", + "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==0.5.0" + "version": "==0.5.1" }, "pycparser": { "hashes": [ @@ -557,11 +568,11 @@ }, "pyopenssl": { "hashes": [ - "sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2", - "sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac" + "sha256:6aa33039a93fffa4563e655b61d11364d01264be8ccb49906101e02a334530bf", + "sha256:ba07553fb6fd6a7a2259adb9b84e12302a9a8a75c44046e8bb5d3e5ee887e3c3" ], - "markers": "python_version >= '3.6'", - "version": "==23.2.0" + "markers": "python_version >= '3.7'", + "version": "==24.0.0" }, "pyparsing": { "hashes": [ @@ -579,48 +590,48 @@ }, "pytest": { "hashes": [ - "sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002", - "sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069" + "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c", + "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6" ], "index": "pypi", - "markers": "python_version >= '3.7'", - "version": "==7.4.2" + "markers": "python_version >= '3.8'", + "version": "==8.0.0" }, "pytest-asyncio": { "hashes": [ - "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d", - "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b" + "sha256:3a048872a9c4ba14c3e90cc1aa20cbc2def7d01c7c8db3777ec281ba9c057675", + "sha256:4e7093259ba018d58ede7d5315131d21923a60f8a6e9ee266ce1589685c89eac" ], "index": "pypi", - "markers": "python_version >= '3.7'", - "version": "==0.21.1" + "markers": "python_version >= '3.8'", + "version": "==0.23.5" }, "pytest-repeat": { "hashes": [ - "sha256:4474a7d9e9137f6d8cc8ae297f8c4168d33c56dd740aa78cfffe562557e6b96e", - "sha256:5cd3289745ab3156d43eb9c8e7f7d00a926f3ae5c9cf425bec649b2fe15bad5b" - ], - "index": "pypi", - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==0.9.1" - }, - "pytest-timeout": { - "hashes": [ - "sha256:c07ca07404c612f8abbe22294b23c368e2e5104b521c1790195561f37e1ac3d9", - "sha256:f6f50101443ce70ad325ceb4473c4255e9d74e3c7cd0ef827309dfa4c0d975c6" - ], - "index": "pypi", - "markers": "python_version >= '3.6'", - "version": "==2.1.0" - }, - "pytest-xdist": { - "hashes": [ - "sha256:d5ee0520eb1b7bcca50a60a518ab7a7707992812c578198f8b44fdfac78e8c93", - "sha256:ff9daa7793569e6a68544850fd3927cd257cc03a7ef76c95e86915355e82b5f2" + "sha256:26ab2df18226af9d5ce441c858f273121e92ff55f5bb311d25755b8d7abdd8ed", + "sha256:ffd3836dfcd67bb270bec648b330e20be37d2966448c4148c4092d1e8aba8185" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==3.3.1" + "version": "==0.9.3" + }, + "pytest-timeout": { + "hashes": [ + "sha256:3b0b95dabf3cb50bac9ef5ca912fa0cfc286526af17afc806824df20c2f72c90", + "sha256:bde531e096466f49398a59f2dde76fa78429a09a12411466f88a07213e220de2" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==2.2.0" + }, + "pytest-xdist": { + "hashes": [ + "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a", + "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==3.5.0" }, "pyyaml": { "hashes": [ @@ -653,6 +664,7 @@ "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", + "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef", "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", @@ -693,36 +705,37 @@ "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001", "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462", "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9", + "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe", "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b", "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b", "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615", + "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62", "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15", "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b", + "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1", "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9", "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675", - "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1", "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899", "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7", "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7", "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312", "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa", - "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f", "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91", - "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa", "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b", + "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6", "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3", "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334", "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5", "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3", "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe", - "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3", + "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c", "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed", "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337", "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880", + "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f", "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d", "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248", "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d", - "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279", "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf", "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512", "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069", @@ -731,7 +744,6 @@ "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d", "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31", "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92", - "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd", "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5", "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28", "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d", @@ -760,28 +772,28 @@ }, "tornado": { "hashes": [ - "sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f", - "sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5", - "sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d", - "sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3", - "sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2", - "sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a", - "sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16", - "sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a", - "sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17", - "sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0", - "sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe" + "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0", + "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63", + "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263", + "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052", + "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f", + "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee", + "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78", + "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579", + "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212", + "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e", + "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2" ], "markers": "python_version >= '3.8'", - "version": "==6.3.3" + "version": "==6.4" }, "typing-extensions": { "hashes": [ - "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0", - "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef" + "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", + "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" ], "markers": "python_version >= '3.8'", - "version": "==4.8.0" + "version": "==4.9.0" }, "urwid": { "hashes": [ @@ -791,12 +803,12 @@ }, "werkzeug": { "hashes": [ - "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc", - "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10" + "sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8", + "sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==3.0.1" + "version": "==2.3.7" }, "wsproto": { "hashes": [ @@ -864,40 +876,40 @@ "develop": { "attrs": { "hashes": [ - "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04", - "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015" + "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30", + "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1" ], "markers": "python_version >= '3.7'", - "version": "==23.1.0" + "version": "==23.2.0" }, "black": { "hashes": [ - "sha256:031e8c69f3d3b09e1aa471a926a1eeb0b9071f80b17689a655f7885ac9325a6f", - "sha256:13a2e4a93bb8ca74a749b6974925c27219bb3df4d42fc45e948a5d9feb5122b7", - "sha256:13ef033794029b85dfea8032c9d3b92b42b526f1ff4bf13b2182ce4e917f5100", - "sha256:14f04c990259576acd093871e7e9b14918eb28f1866f91968ff5524293f9c573", - "sha256:24b6b3ff5c6d9ea08a8888f6977eae858e1f340d7260cf56d70a49823236b62d", - "sha256:403397c033adbc45c2bd41747da1f7fc7eaa44efbee256b53842470d4ac5a70f", - "sha256:50254ebfa56aa46a9fdd5d651f9637485068a1adf42270148cd101cdf56e0ad9", - "sha256:538efb451cd50f43aba394e9ec7ad55a37598faae3348d723b59ea8e91616300", - "sha256:638619a559280de0c2aa4d76f504891c9860bb8fa214267358f0a20f27c12948", - "sha256:6a3b50e4b93f43b34a9d3ef00d9b6728b4a722c997c99ab09102fd5efdb88325", - "sha256:6ccd59584cc834b6d127628713e4b6b968e5f79572da66284532525a042549f9", - "sha256:75a2dc41b183d4872d3a500d2b9c9016e67ed95738a3624f4751a0cb4818fe71", - "sha256:7d30ec46de88091e4316b17ae58bbbfc12b2de05e069030f6b747dfc649ad186", - "sha256:8431445bf62d2a914b541da7ab3e2b4f3bc052d2ccbf157ebad18ea126efb91f", - "sha256:8fc1ddcf83f996247505db6b715294eba56ea9372e107fd54963c7553f2b6dfe", - "sha256:a732b82747235e0542c03bf352c126052c0fbc458d8a239a94701175b17d4855", - "sha256:adc3e4442eef57f99b5590b245a328aad19c99552e0bdc7f0b04db6656debd80", - "sha256:c46767e8df1b7beefb0899c4a95fb43058fa8500b6db144f4ff3ca38eb2f6393", - "sha256:c619f063c2d68f19b2d7270f4cf3192cb81c9ec5bc5ba02df91471d0b88c4c5c", - "sha256:cf3a4d00e4cdb6734b64bf23cd4341421e8953615cba6b3670453737a72ec204", - "sha256:cf99f3de8b3273a8317681d8194ea222f10e0133a24a7548c73ce44ea1679377", - "sha256:d6bc09188020c9ac2555a498949401ab35bb6bf76d4e0f8ee251694664df6301" + "sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8", + "sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8", + "sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd", + "sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9", + "sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31", + "sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92", + "sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f", + "sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29", + "sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4", + "sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693", + "sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218", + "sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a", + "sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23", + "sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0", + "sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982", + "sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894", + "sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540", + "sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430", + "sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b", + "sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2", + "sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6", + "sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==23.9.1" + "version": "==24.2.0" }, "click": { "hashes": [ @@ -909,30 +921,30 @@ }, "flake8": { "hashes": [ - "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23", - "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5" + "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132", + "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3" ], "index": "pypi", "markers": "python_full_version >= '3.8.1'", - "version": "==6.1.0" + "version": "==7.0.0" }, "flake8-bugbear": { "hashes": [ - "sha256:90cf04b19ca02a682feb5aac67cae8de742af70538590509941ab10ae8351f71", - "sha256:b182cf96ea8f7a8595b2f87321d7d9b28728f4d9c3318012d896543d19742cb5" + "sha256:663ef5de80cd32aacd39d362212983bc4636435a6f83700b4ed35acbd0b7d1b8", + "sha256:f9cb5f2a9e792dd80ff68e89a14c12eed8620af8b41a49d823b7a33064ac9658" ], "index": "pypi", "markers": "python_full_version >= '3.8.1'", - "version": "==23.9.16" + "version": "==24.2.6" }, "isort": { "hashes": [ - "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504", - "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6" + "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", + "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6" ], "index": "pypi", "markers": "python_full_version >= '3.8.0'", - "version": "==5.12.0" + "version": "==5.13.2" }, "mccabe": { "hashes": [ @@ -960,19 +972,19 @@ }, "pathspec": { "hashes": [ - "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20", - "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3" + "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", + "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712" ], - "markers": "python_version >= '3.7'", - "version": "==0.11.2" + "markers": "python_version >= '3.8'", + "version": "==0.12.1" }, "platformdirs": { "hashes": [ - "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3", - "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e" + "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068", + "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768" ], - "markers": "python_version >= '3.7'", - "version": "==3.11.0" + "markers": "python_version >= '3.8'", + "version": "==4.2.0" }, "pycodestyle": { "hashes": [ @@ -984,11 +996,11 @@ }, "pyflakes": { "hashes": [ - "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774", - "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc" + "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f", + "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a" ], "markers": "python_version >= '3.8'", - "version": "==3.1.0" + "version": "==3.2.0" }, "tomli": { "hashes": [ @@ -1000,11 +1012,11 @@ }, "typing-extensions": { "hashes": [ - "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0", - "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef" + "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", + "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" ], "markers": "python_version >= '3.8'", - "version": "==4.8.0" + "version": "==4.9.0" } } } diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 16ff091e7..d149ff650 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -24,27 +24,28 @@ jobs: runs-on: ubuntu-latest name: Initialize parameters outputs: - build_image_name: "citus/extbuilder" - test_image_name: "citus/exttester" - citusupgrade_image_name: "citus/citusupgradetester" - fail_test_image_name: "citus/failtester" - pgupgrade_image_name: "citus/pgupgradetester" - style_checker_image_name: "citus/stylechecker" + build_image_name: "ghcr.io/citusdata/extbuilder" + test_image_name: "ghcr.io/citusdata/exttester" + citusupgrade_image_name: "ghcr.io/citusdata/citusupgradetester" + fail_test_image_name: "ghcr.io/citusdata/failtester" + pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester" + style_checker_image_name: "ghcr.io/citusdata/stylechecker" style_checker_tools_version: "0.8.18" - image_suffix: "-v19b671f" - pg14_version: '{ "major": "14", "full": "14.10" }' - pg15_version: '{ "major": "15", "full": "15.5" }' - pg16_version: '{ "major": "16", "full": "16.1" }' - upgrade_pg_versions: "14.10-15.5-16.1" + sql_snapshot_pg_version: "16.6" + image_suffix: "-v5779674" + pg14_version: '{ "major": "14", "full": "14.15" }' + pg15_version: '{ "major": "15", "full": "15.10" }' + pg16_version: '{ "major": "16", "full": "16.6" }' + upgrade_pg_versions: "14.15-15.10-16.6" steps: - # Since GHA jobs needs at least one step we use a noop step here. + # Since GHA jobs need at least one step we use a noop step here. - name: Set up parameters run: echo 'noop' check-sql-snapshots: needs: params runs-on: ubuntu-20.04 container: - image: ${{ needs.params.outputs.build_image_name }}:latest + image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }} options: --user root steps: - uses: actions/checkout@v3.5.0 @@ -61,7 +62,7 @@ jobs: - name: Check Snapshots run: | git config --global --add safe.directory ${GITHUB_WORKSPACE} - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Check C Style @@ -117,7 +118,7 @@ jobs: image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}" options: --user root steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - name: Expose $PG_MAJOR to Github Env run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV shell: bash @@ -227,7 +228,7 @@ jobs: - params - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Run Test run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }} @@ -261,7 +262,7 @@ jobs: - ${{ needs.params.outputs.pg16_version }} parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Test arbitrary configs run: |- @@ -311,7 +312,7 @@ jobs: old_pg_major: ${{ matrix.old_pg_major }} new_pg_major: ${{ matrix.new_pg_major }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" with: pg_major: "${{ env.old_pg_major }}" @@ -349,7 +350,7 @@ jobs: - params - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" with: skip_installation: true @@ -413,7 +414,7 @@ jobs: needs: - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: azure/login@v1 with: creds: ${{ secrets.AZURE_CREDENTIALS }} @@ -431,7 +432,7 @@ jobs: needs: - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: azure/login@v1 with: creds: ${{ secrets.AZURE_CREDENTIALS }} @@ -450,7 +451,7 @@ jobs: outputs: json: ${{ steps.parallelization.outputs.json }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/parallelization" id: parallelization with: @@ -463,7 +464,7 @@ jobs: outputs: tests: ${{ steps.detect-regression-tests.outputs.tests }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Detect regression tests need to be ran @@ -514,7 +515,7 @@ jobs: fail-fast: false matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |- diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 6478abf4b..027f5a048 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Initialize CodeQL uses: github/codeql-action/init@v2 diff --git a/.github/workflows/flaky_test_debugging.yml b/.github/workflows/flaky_test_debugging.yml index a744edc3b..7135f99fa 100644 --- a/.github/workflows/flaky_test_debugging.yml +++ b/.github/workflows/flaky_test_debugging.yml @@ -28,7 +28,7 @@ jobs: image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }} options: --user root steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - name: Configure, Build, and Install run: | echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV @@ -46,7 +46,7 @@ jobs: outputs: json: ${{ steps.parallelization.outputs.json }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/parallelization" id: parallelization with: @@ -67,7 +67,7 @@ jobs: fail-fast: false matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |- diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index 4ae741a91..7f89b9f83 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -19,7 +19,7 @@ jobs: pg_versions: ${{ steps.get-postgres-versions.outputs.pg_versions }} steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 2 - name: Get Postgres Versions @@ -51,18 +51,6 @@ jobs: - almalinux-8 - almalinux-9 POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }} - # Postgres removed support for CentOS 7 in PG 16. Below block is needed to - # keep the build for CentOS 7 working for PG 14 and PG 15. - # Once dependent systems drop support for Centos 7, we can remove this block. - include: - - packaging_docker_image: centos-7 - POSTGRES_VERSION: 14 - - packaging_docker_image: centos-7 - POSTGRES_VERSION: 15 - - packaging_docker_image: oraclelinux-7 - POSTGRES_VERSION: 14 - - packaging_docker_image: oraclelinux-7 - POSTGRES_VERSION: 15 container: image: citus/packaging:${{ matrix.packaging_docker_image }}-pg${{ matrix.POSTGRES_VERSION }} @@ -70,7 +58,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set Postgres and python parameters for rpm based distros run: | @@ -128,7 +116,6 @@ jobs: # for each deb based image and we use POSTGRES_VERSION to set # PG_CONFIG variable in each of those runs. packaging_docker_image: - - debian-buster-all - debian-bookworm-all - debian-bullseye-all - ubuntu-focal-all diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d979c104..0ebb6bec8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,213 @@ +### citus v13.0.0 (January 17, 2025) ### + +* Adds support for PostgreSQL 17 (#7699, #7661) + +* Adds `JSON_TABLE()` support in distributed queries (#7816) + +* Propagates `MERGE ... WHEN NOT MATCHED BY SOURCE` (#7807) + +* Propagates `MEMORY` and `SERIALIZE` options of `EXPLAIN` (#7802) + +* Adds support for identity columns in distributed partitioned tables (#7785) + +* Allows specifying an access method for distributed partitioned tables (#7818) + +* Allows exclusion constraints on distributed partitioned tables (#7733) + +* Allows configuring sslnegotiation using `citus.node_conn_info` (#7821) + +* Avoids wal receiver timeouts during large shard splits (#7229) + +* Fixes a bug causing incorrect writing of data to target `MERGE` repartition + command (#7659) + +* Fixes a crash that happens because of unsafe catalog access when re-assigning + the global pid after `application_name` changes (#7791) + +* Fixes incorrect `VALID UNTIL` setting assumption made for roles when syncing + them to new nodes (#7534) + +* Fixes segfault when calling distributed procedure with a parameterized + distribution argument (#7242) + +* Fixes server crash when trying to execute `activate_node_snapshot()` on a + single-node cluster (#7552) + +* Improves `citus_move_shard_placement()` to fail early if there is a new node + without reference tables yet (#7467) + +### citus v12.1.6 (Nov 14, 2024) ### + +* Propagates `SECURITY LABEL .. ON ROLE` statements (#7304) + +* Fixes crash caused by running queries with window partition (#7718) + +### citus v12.1.5 (July 17, 2024) ### + +* Adds support for MERGE commands with single shard distributed target tables + (#7643) + +* Fixes an error with MERGE commands when insert value does not have source + distribution column (#7627) + +### citus v12.1.4 (May 28, 2024) ### + +* Adds null check for node in HasRangeTableRef (#7604) + +### citus v12.1.3 (April 18, 2024) ### + +* Allows overwriting host name for all inter-node connections by + supporting "host" parameter in citus.node_conninfo (#7541) + +* Changes the order in which the locks are acquired for the target and + reference tables, when a modify request is initiated from a worker + node that is not the "FirstWorkerNode" (#7542) + +* Fixes a performance issue when distributing a table that depends on an + extension (#7574) + +* Fixes a performance issue when using "\d tablename" on a server with + many tables (#7577) + +* Fixes a crash caused by some form of ALTER TABLE ADD COLUMN + statements. When adding multiple columns, if one of the ADD COLUMN + statements contains a FOREIGN constraint omitting the referenced + columns in the statement, a SEGFAULT was occurring. (#7522) + +* Fixes a performance issue when creating distributed tables if many + already exist (#7575, #7579) + +* Fixes a bug when hostname in pg_dist_node resolves to multiple IPs + (#7377) + +* Fixes performance issue when tracking foreign key constraints on + systems with many constraints (#7578) + +* Fixes segmentation fault when using CASE WHEN in DO block within + functions. (#7554) + +* Fixes undefined behavior in master_disable_node due to argument + mismatch (#7492) + +* Fixes some potential bugs by correctly marking some variables as + volatile (#7570) + +* Logs username in the failed connection message (#7432) + +### citus v11.0.10 (February 15, 2024) ### + +* Removes pg_send_cancellation and all references (#7135) + +### citus v12.1.2 (February 12, 2024) ### + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +### citus v12.0.1 (July 11, 2023) ### + +* Fixes incorrect default value assumption for VACUUM(PROCESS_TOAST) #7122) + +* Fixes a bug that causes an unexpected error when adding a column + with a NULL constraint (#7093) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes memory and memory contexts leaks in Foreign Constraint Graphs (#7236) + +* Fixes shard size bug with too many shards (#7018) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves citus_tables view performance (#7050) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation and all references (#7135) + +### citus v11.3.1 (February 12, 2024) ### + +* Disallows MERGE when the query prunes down to zero shards (#6946) + +* Fixes a bug related to non-existent objects in DDL commands (#6984) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes incorrect results on fetching scrollable with hold cursors (#7014) + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes replicate reference tables task fail when user is superuser (#6930) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves citus_shard_sizes performance (#7050) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation and all references (#7135) + +### citus v11.2.2 (February 12, 2024) ### + +* Fixes a bug in background shard rebalancer where the replicate + reference tables task fails if the current user is not a superuser (#6930) + +* Fixes a bug related to non-existent objects in DDL commands (#6984) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes incorrect results on fetching scrollable with hold cursors (#7014) + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves failure handling of distributed execution (#7090) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation (#7135) + +### citus v11.1.7 (February 12, 2024) ### + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes a bug related to non-existent objects in DDL commands (#6984) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes incorrect results on fetching scrollable with hold cursors (#7014) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves failure handling of distributed execution (#7090) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation and all references (#7135) + +### citus v11.0.9 (February 12, 2024) ### + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes the incorrect column count after ALTER TABLE (#7462) + +* Improve failure handling of distributed execution (#7090) + ### citus v12.1.1 (November 9, 2023) ### * Fixes leaking of memory and memory contexts in Citus foreign key cache diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0b5665544..5a9bb8f5e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -35,6 +35,28 @@ To get citus installed from source we run `make install -s` in the first termina With the Citus cluster running you can connect to the coordinator in the first terminal via `psql -p9700`. Because the coordinator is the most common entrypoint the `PGPORT` environment is set accordingly, so a simple `psql` will connect directly to the coordinator. +### Debugging in the VS code + +1. Start Debugging: Press F5 in VS Code to start debugging. When prompted, you'll need to attach the debugger to the appropriate PostgreSQL process. + +2. Identify the Process: If you're running a psql command, take note of the PID that appears in your psql prompt. For example: +``` +[local] citus@citus:9700 (PID: 5436)=# +``` +This PID (5436 in this case) indicates the process that you should attach the debugger to. +If you are uncertain about which process to attach, you can list all running PostgreSQL processes using the following command: +``` +ps aux | grep postgres +``` + +Look for the process associated with the PID you noted. For example: +``` +citus 5436 0.0 0.0 0 0 ? S 14:00 0:00 postgres: citus citus +``` +4. Attach the Debugger: Once you've identified the correct PID, select that process when prompted in VS Code to attach the debugger. You should now be able to debug the PostgreSQL session tied to the psql command. + +5. Set Breakpoints and Debug: With the debugger attached, you can set breakpoints within the code. This allows you to step through the code execution, inspect variables, and fully debug the PostgreSQL instance running in your container. + ### Getting and building [PostgreSQL documentation](https://www.postgresql.org/support/versioning/) has a diff --git a/DEVCONTAINER.md b/DEVCONTAINER.md new file mode 100644 index 000000000..d004e6f71 --- /dev/null +++ b/DEVCONTAINER.md @@ -0,0 +1,43 @@ +# Devcontainer + +## Coredumps +When postgres/citus crashes, there is the option to create a coredump. This is useful for debugging the issue. Coredumps are enabled in the devcontainer by default. However, not all environments are configured correctly out of the box. The most important configuration that is not standardized is the `core_pattern`. The configuration can be verified from the container, however, you cannot change this setting from inside the container as the filesystem containing this setting is in read only mode while inside the container. + +To verify if corefiles are written run the following command in a terminal. This shows the filename pattern with which the corefile will be written. +```bash +cat /proc/sys/kernel/core_pattern +``` + +This should be configured with a relative path or simply a simple filename, such as `core`. When your environment shows an absolute path you will need to change this setting. How to change this setting depends highly on the underlying system as the setting needs to be changed on the kernel of the host running the container. + +You can put any pattern in `/proc/sys/kernel/core_pattern` as you see fit. eg. You can add the PID to the core pattern in one of two ways; +- You either include `%p` in the core_pattern. This gets substituted with the PID of the crashing process. +- Alternatively you could set `/proc/sys/kernel/core_uses_pid` to `1` in the same way as you set `core_pattern`. This will append the PID to the corefile if `%p` is not explicitly contained in the core_pattern. + +When a coredump is written you can use the debug/launch configuration `Open core file` which is preconfigured in the devcontainer. This will open a fileprompt that lists all coredumps that are found in your workspace. When you want to debug coredumps from `citus_dev` that are run in your `/data` directory, you can add the data directory to your workspace. In the command pallet of vscode you can run `>Workspace: Add Folder to Workspace...` and select the `/data` directory. This will allow you to open the coredumps from the `/data` directory in the `Open core file` debug configuration. + +### Windows (docker desktop) +When running in docker desktop on windows you will most likely need to change this setting. The linux guest in WSL2 that runs your container is the `docker-desktop` environment. The easiest way to get onto the host, where you can change this setting, is to open a powershell window and verify you have the docker-desktop environment listed. + +```powershell +wsl --list +``` + +Among others this should list both `docker-desktop` and `docker-desktop-data`. You can then open a shell in the `docker-desktop` environment. + +```powershell +wsl -d docker-desktop +``` + +Inside this shell you can verify that you have the right environment by running + +```bash +cat /proc/sys/kernel/core_pattern +``` + +This should show the same configuration as the one you see inside the devcontainer. You can then change the setting by running the following command. +This will change the setting for the current session. If you want to make the change permanent you will need to add this to a startup script. + +```bash +echo "core" > /proc/sys/kernel/core_pattern +``` diff --git a/ci/check_gucs_are_alphabetically_sorted.sh b/ci/check_gucs_are_alphabetically_sorted.sh index 214a5c9cf..018fc7d35 100755 --- a/ci/check_gucs_are_alphabetically_sorted.sh +++ b/ci/check_gucs_are_alphabetically_sorted.sh @@ -20,6 +20,6 @@ tail -n +$RegisterCitusConfigVariables_begin_linenumber src/backend/distributed/ # extract citus gucs in the form of "citus.X" grep -P "^[\t][\t]\"citus\.[a-zA-Z_0-9]+\"" RegisterCitusConfigVariables_func_def.out > gucs.out -sort -c gucs.out +LC_COLLATE=C sort -c gucs.out rm gucs.out rm RegisterCitusConfigVariables_func_def.out diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index 40486d08f..fd3d171c6 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -2946,7 +2946,7 @@ MajorVersionsCompatibleColumnar(char *leftVersion, char *rightVersion) } else { - rightComparisionLimit = strlen(leftVersion); + rightComparisionLimit = strlen(rightVersion); } /* we can error out early if hypens are not in the same position */ @@ -3021,6 +3021,8 @@ AvailableExtensionVersionColumnar(void) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("citus extension is not found"))); + + return NULL; /* keep compiler happy */ } diff --git a/src/backend/distributed/README.md b/src/backend/distributed/README.md index 6e3d8cf1c..7da5dcfab 100644 --- a/src/backend/distributed/README.md +++ b/src/backend/distributed/README.md @@ -48,7 +48,7 @@ The purpose of this document is to provide comprehensive technical documentation - [Rebalancing algorithm](#rebalancing-algorithm) - [Shard moves](#shard-moves) - [Shard splits](#shard-splits) - - [Background tasks](#background-tasks) + - [Background task runner](#background-task-runner) - [Resource cleanup](#resource-cleanup) - [Logical decoding / CDC](#logical-decoding--cdc) - [CDC ordering](#cdc-ordering) @@ -2344,17 +2344,164 @@ In the past we had some bugs where we had a `palloc` failure while holding `Spin # Rebalancing -A high-level overview of the rebalancer is given in [this rebalancer blog post](https://www.citusdata.com/blog/2021/03/13/scaling-out-postgres-with-citus-open-source-shard-rebalancer/). +A high-level overview of the shard rebalancer is given in [this rebalancer blog post][rebalancer-post]. It is a bit outdated though, specifically that it uses `rebalance_table_shards()` instead of the newer `citus_rebalance_start()`. + +The shard rebalancer consists of 4 main parts: + +1. The rebalancing algorithm: Decides what moves/splits it should do to make + the cluster balanced. +2. The background task runner: Runs a full rebalance according to a plan + created by the planner. +3. A shard group moves/split: These are the smallest units of work that the + rebalancer does, if this fails midway through the move is aborted and the + shard group remains unchanged. +4. Deferred cleanup: The source shards stay present for a while after a move to + let long-running read queries continue, eventually they need to be cleaned + up. + +These parts interact, but they are pretty self-contained. Usually it's only +necessary to change one of them to add a feature/fix a bug. + +[rebalancer-post]: https://www.citusdata.com/blog/2021/03/13/scaling-out-postgres-with-citus-open-source-shard-rebalancer/ ## Rebalancing algorithm +The rebalancing algorithm tries to find an optimal placement of shard groups +across nodes. This is not an easy job, because this is a [co-NP-complete +problem](https://en.wikipedia.org/wiki/Knapsack_problem). So instead of going for +the fully optimal solution it uses a greedy approach to reach a local +optimum, which so far has proved effective in getting to a pretty optimal +solution. + +Even though it won't result in the perfect balance, the greedy approach has two +important practical benefits over a perfect solution: +1. It's relatively easy to understand why the algorithm decided on a certain move. +2. Every move makes the balance better. So if the rebalance is cancelled midway + through, the cluster will always be in a better situation than before. + +As described in the [this rebalancer blog post][rebalance-post] the algorithm +takes three inputs from the function in the `pg_dist_rebalance_strategy` table: + +1. Is a shard group allowed on a certain node? +2. What is the "cost" of a shard group, relative to the other shard groups? +3. What is the "capacity" of a node, relative to the other nodes? + +Cost and capacity are vague on purpose, this way users can choose their own +way to determine cost of a shard group, but **in practice "cost" is usually +disk size** (because `by_disk_size` is the default rebalance strategy). +Capacity is almost always set to 1, because almost all Citus clusters are +homogeneous (they contain the same nodes, except for maybe the coordinator). The +main usage for "Is a shard group allowed on a certain node?" is to be able to pin a +specific shard group to a specific node. + +There is one last definition that you should know to understand the algorithm +and that is "utilization". Utilization is the total cost of all shard groups +divided by capacity. In practice this means that utilization is almost always +the same as cost because as explained above capacity is almost always 1. So if +you see "utilization" in the algorithm, for all intents and purposes you can +read it as "cost". + +The way the general algorithm works is fairly straightforward. It starts by +creating an in-memory representation of the cluster, and then it tries to +improve that in-memory representation by making theoretical moves. So to be +clear the algorithm doesn't actually do any shard group moves, it only does +those moves to its in-memory representation. The way it determines what +theoretical moves to make is as follows (updating utilization of in-memory +nodes after every move): + +1. Find all shard groups that are on a node where they are not allowed (due to + "Is a shard group allowed on a certain node?") +2. Order those nodes by cost +3. Move them one-by one to nodes with the lowest utilization where they are + allowed. +4. If the cluster is balanced we are done. +5. Take the most utilized node (A) and take the least utilized node (B). +6. Try moving the shard group with the highest cost from A to B. +7. If the balance is "better" commit this move and continue from step 4. (See subsection below for + what is "better") +8. If the balance is worse/equal try again from step 6 with the shard group + with the next highest cost on node A. If this was the lowest cost shard on + node A, then try with the highest cost shard again but on the next least + utilized node after node B. If no moves helped with the balance, try with + the next most utilized node after node A. If we tried all moves for all + nodes like this, we are done (we cannot get a better balance). + + +Of course, the devil is in the details though. + +### When is the balance better? + +The main way to determine if the balance is better is by comparing the +utilization of node A and B, before and after the move and seeing if they are +net closer to the average utilization of the nodes in the cluster. The easiest +way to explain this is with a simple example: + +We have two nodes A and B. A has a utilization of 100GB and B has a utilization +of 70GB. So we will move a shard from A to B. A move of 15GB is obviously best, +it results in perfect balance (A=85GB, B=85GB). A move of a 10GB is still +great, both improved in balance (A=90GB, B=80GB). A move of 20GB is also good, +the result is the same as a move of 10GB only with the nodes swapped (A=80GB, +B=90GB). + +The 10GB vs 20GB move shows a limitation of the current algorithm. The +algorithm mostly makes choices based on the end state, not on the cost of +moving a shard. This is usually not a huge problem in practice though. + +### Thresholds + +The algorithm is full of thresholds, the main reason these exist is because +moving shards around isn't free. + +- `threshold`: Used to determine if the cluster is in a good enough state. For + the `by_disk_size` rebalance strategy this is 10%, so if all nodes are at + most 10% above or 10% below the average utilization then no moves are + necessary anymore (i.e. the nodes are balanced enough). The main reason for + this threshold is that these small differences in utilization are not + necessarily problematic and might very well resolve automatically over time. For example, consider a scenario in which + one shard gets mostly written in during the weekend, while another one during + the week. Moving shards on Monday and that you then have to move back on + Friday is not very helpful given the overhead of moving data around. +- `improvement_threshold`: This is used in cases where a shard group move from + node A to B swaps which node now has the highest utilization (so afterwards B + will have higher utilization than A). As described above this can still + result in better balance. This threshold is meant to work around a + particularly bad situation where we move a lot of data for very little + benefit. Imagine this situation: A=200GB and B=99, thus moving a 100GB shard + from A to B would bring their utilization closer to the average (A=100GB, + B=199GB). But obviously that's a tiny gain for a move of 100GB, which + probably takes lots of resources and time. The `improvement_threshold` is set + to 50% for the `by_disk_size` rebalance strategy. This means that this move + is only chosen if the utilization improvement is larger than 50% of the + utilization that the shard group causes on its current node. + +### How do multiple colocation groups impact the rebalancer algorithm? + +The previous section glossed over colocation groups a bit. The main reason for +that is that the algorithm doesn't handle multiple colocation groups very well. +If there are multiple colocation groups each colocation group gets balanced +completely separately. For the utilization calculations only the costs are used +for the shard groups in the colocation group that is currently being rebalanced. +The reasoning for this is that if you have two colocation groups, you probably +want to spread the shard groups from both colocation groups across multiple +nodes. And not have shard groups from colocation group 1 only be on node A and +shard groups from colocation group 2 only be on node B. + +There is an important caveat here though for colocation groups that have fewer +shard groups than the number of nodes in the cluster (in practice these are +usually colocation groups used by schema based sharding, i.e. with a single +shard group): The rebalancer algorithm balances the shard groups from these +colocation groups as if they are all all part of a single colocation group. +The main reason for this is to make sure that schemas for schema based sharding +are spread evenly across the nodes. + + ## Shard moves -Shard moves move a shard group placement to a different node (group). Moves are orchestrated by the `citus_move_shard_placement` UDF, which is also the function that the rebalancer runs to move a shard. +Shard moves move a shard group placement to a different node (group). It would be more correct if these were called "shard **group** moves", but in many places we don't due to historical reasons. Moves are orchestrated by the `citus_move_shard_placement` UDF, which is also the function that the rebalancer runs to move a shard. We implement blocking and non-blocking shard splits. Non-blocking shard moves use logical replication, which has an important limitation. If the (distributed) table does not have a replica identity (usually the primary key), then update/delete commands will error out once we create a publication. That means using a non-blocking move without a replica identity does incur some downtime. Since a blocking move is generally faster (in part because it forces out regular work), it may be less invasive. We therefore force the user to choose when trying to move a shard group that includes a table without a replica identity by supplying `shard_transfer_mode := 'force_logical'` or `shard_transfer_mode := 'block_writes'`. -The blocking-move is mostly a simplified variant of the non-blocking move (with locks taken upfront). A non-blocking move involves the following steps: +The blocking-move is mostly a simplified variant of the non-blocking move, where the write locks are taken upfront so that no catch-up using logical replication is needed. A non-blocking move involves the following steps: - **Create the new shard group placement on the target node**. We also create constraints that do not involve an index and set up ownership and access control. - **Create publication(s) on the source node**. We create publications containing the shards in the source shard group placement. We create one publications per table owner, mainly because we need one subscription per table owner to prevent privilege escalation issues on older versions of PostgreSQL (15 and below). @@ -2379,7 +2526,7 @@ A workaround for the replica identity problem is to always assign REPLICA IDENTI ## Shard splits -Shard splits convert one shard group ("split parent") into two or more shard groups ("split children") by splitting the hash range. The new shard groups can be placed on the node itself, or on other nodes. We implement blocking and non-blocking shard splits. The blocking variant is mostly a simplified version of non-blocking, so we only cover non-blocking here. Shard splits have many similarities to shard moves, and have the same `shard_transfer_mode` choice. +Shard splits convert one shard group ("split parent") into two or more shard groups ("split children") by splitting the hash range. Just like with shard moves it would be more correct to call these "shard **group** splits", but again we often don't. The new shard groups can be placed on the node itself, or on other nodes. We implement blocking and non-blocking shard splits. The blocking variant is mostly a simplified version of non-blocking, so we only cover non-blocking here. Shard splits have many similarities to shard moves, and have the same `shard_transfer_mode` choice. The shard split is a lengthy process performed by the `NonBlockingShardSplit` function, supported by a custom output plugin to handle writes that happen during the split. There are a few different entry-points in this logic, namely: `citus_split_shard_by_split_points`, `create_distributed_table_concurrently`, and `isolate_tenant_to_node`. @@ -2409,19 +2556,54 @@ A difference between splits and moves is that the old shard ID disappears. In ca ## Background tasks +In the past the only way to trigger a rebalance was to call +`rebalance_table_shards()`, this function run the rebalance using the current +session. This has the huge downside that the connection needs to be kept open +until the rebalance completes. So eventually we [introduced +`citus_rebalance_start()`](https://www.citusdata.com/blog/2022/09/19/citus-11-1-shards-postgres-tables-without-interruption/#rebalance-background), +which uses a background worker to do the rebalancing, so users can disconnect +their client and the rebalance continues. It even automatically retries moves +if they failed for some reason. + +The way this works is using a general background job infrastructure that Citus +has in the tables `pg_dist_backround_job` and `pg_dist_background_task`. +A job (often) contains multiple tasks. In case of the rebalancer, the job is +the full rebalance, and each of its tasks are separate shard group moves. + +### Parallel background task execution + +A big benefit of the background task infrastructure is that it can execute tasks +and jobs in parallel. This can make rebalancing go much faster especially in +clusters with many nodes. To ensure that we're not doing too many tasks in +parallel though we have a few ways to limit concurrency: + +1. Tasks can depend on each other. This makes sure that one task doesn't start + before all the ones that it depends on have finished. +2. The maximum number of parallel tasks being executed at the same time can be + limited using `citus.max_background_task_executors`. The default for + this is 4. +3. Tasks can specify which nodes are involved in the task, that way we can + control that a single node is not involved into too many tasks. The + rebalancer specifies both the source and target node as being involved in + the task. That together with the default of 1 for + `citus.max_background_task_executors_per_node` makes sure that a node + doesn't handle more than a single shard move at once, while still allowing + moves involving different nodes to happen in parallel. For larger machines + it can be beneficial to increase the default a bit. + ## Resource cleanup During a shard move/split, some PostgreSQL objects can be created that live outside of the scope of any transaction or are committed early. We need to make sure those objects are dropped once the shard move ends, either through failure or success. For instance, subscriptions and publications used for logical replication need to be dropped in case of failure, but also the target shard (in case of failure) and source shard (in case of success). To achieve that, we write records to pg_dist_cleanup before creating an object to remember that we need to clean it. We distinguish between a few scenarios: +**Cleanup-always**: For most resources that require cleanup records, cleanup should happen regardless of whether the operation succeeds or fails. For instance, subscriptions and publications should always be dropped. We achieve cleanup always by writing pg_dist_cleanup records in a subtransaction, and at the end of the operation we try to clean up object immediately and if it succeeds delete the record. If cleanup fails, we do not fail the whole operation, but instead leave the pg_dist_cleanup record in place for the maintenance daemon. + **Cleanup-on-failure**: Cleanup should only happen if the operation fails. The main example is the target shard of a move/split. We achieve cleanup-on-failure by writing pg_dist_cleanup records in a subtransaction (transaction on a localhost connection that commits immediately) and deleting them in the outer transaction that performs the move/split. That way, they remain in pg_dist_cleanup in case of failure, but disappear in case of success. **Cleanup-deferred-on-success**: Cleanup should only happen after the operation (move/split) succeeds. We use this to clean the source shards of a shard move. We previously dropped shards immediately as part of the transaction, but this frequently led to deadlocks at the end of a shard move. We achieve cleanup-on-success by writing pg_dist_cleanup records as part of the outer transaction that performs the move/split. -**Cleanup-always**: For most resources that require cleanup records, cleanup should happen regardless of whether the operation succeeds or fails. For instance, subscriptions and publications should always be dropped. We achieve cleanup always by writing pg_dist_cleanup records in a subtransaction, and at the end of the operation we try to clean up object immediately and if it succeeds delete the record. If cleanup fails, we do not fail the whole operation, but instead leave the pg_dist_cleanup record in place for the maintenance daemon. - -Resource cleaner (currently shard_cleaner.c) is part of the maintenance daemon and periodically checks pg_dist_cleanup for cleanup tasks. It’s important to prevent cleanup of operations that are already running. Therefore, each operation has a unique operation ID (from a sequence) and takes an advisory lock on the operation ID. The resource cleaner learns the operation ID from pg_dist_cleanup and attempts to acquire this lock. If it cannot acquire the lock, the operation is not done and cleanup is skipped. If it can, the operation is done, and the resource cleaner rechecks whether the record still exists, since it could have been deleted by the operation. +Resource cleaner (currently shard_cleaner.c) is part of the maintenance daemon and periodically checks pg_dist_cleanup for cleanup tasks. It’s important to prevent cleanup of operations that are still running. Therefore, each operation has a unique operation ID (from a sequence) and takes an advisory lock on the operation ID. The resource cleaner learns the operation ID from pg_dist_cleanup and attempts to acquire this lock. If it cannot acquire the lock, the operation is not done and cleanup is skipped. If it can, the operation is done, and the resource cleaner rechecks whether the record still exists, since it could have been deleted by the operation. Cleanup records always need to be committed before creating the actual object. It’s also important for the cleanup operation to be idempotent, since the server might crash immediately after committing a cleanup record, but before actually creating the object. Hence, the object might not exist when trying to clean it up. In that case, the cleanup is seen as successful, and the cleanup record removed. @@ -2429,7 +2611,7 @@ Cleanup records always need to be committed before creating the actual object. I PostgreSQL supports change data capture (CDC) via the logical decoding interface. The basic idea behind logical decoding is that you make a replication connection (a special type of postgres connection), start replication, and then the backend process reads through the WAL and decodes the WAL records and emits it over the wire in a format defined by the output plugin. If we were to use regular logical decoding on the nodes of a Citus cluster, we would see the name of the shard in each write, and internal data transfers such as shard moves would result in inserts being emitted. We use several techniques to avoid this. -All writes in PostgreSQL are marked with a replication origin (0 by default) and the decoder can make decisions on whether to emit the change based on the replication origin. We use this to filter out internal data transfers. If `citus.enable_change_data_capture` is enabled, all internal data transfers are marked with the special DoNotReplicateId replication origin by calling the `citus_internal_start_replication_origin_tracking()` UDF before writing the data. This replication origin ID is special in the sense that it does not need to be created (which prevents locking issues, especially when dropping replication origins). It is still up to output plugin to decide what to do with changes marked as DoNotReplicateId. +All writes in PostgreSQL are marked with a replication origin (0 by default) and the decoder can make decisions on whether to emit the change based on the replication origin. We use this to filter out internal data transfers. If `citus.enable_change_data_capture` is enabled, all internal data transfers are marked with the special DoNotReplicateId replication origin by calling the `citus_internal.start_replication_origin_tracking()` UDF before writing the data. This replication origin ID is special in the sense that it does not need to be created (which prevents locking issues, especially when dropping replication origins). It is still up to output plugin to decide what to do with changes marked as DoNotReplicateId. We have very minimal control over replication commands like `CREATE_REPLICATION_SLOT`, since there are no direct hooks, and decoder names (e.g. “pgoutput”) are typically hard-coded in the client. The only method we found of overriding logical decoding behaviour is to overload the output plugin name in the dynamic library path. diff --git a/src/backend/distributed/clock/causal_clock.c b/src/backend/distributed/clock/causal_clock.c index 3d64757e3..eb4b8d9d3 100644 --- a/src/backend/distributed/clock/causal_clock.c +++ b/src/backend/distributed/clock/causal_clock.c @@ -397,7 +397,7 @@ AdjustClocksToTransactionHighest(List *nodeConnectionList, /* Set the clock value on participating worker nodes */ appendStringInfo(queryToSend, - "SELECT pg_catalog.citus_internal_adjust_local_clock_to_remote" + "SELECT citus_internal.adjust_local_clock_to_remote" "('(%lu, %u)'::pg_catalog.cluster_clock);", transactionClockValue->logical, transactionClockValue->counter); diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 5ec6d6dd7..8c59aa199 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -22,6 +22,7 @@ #include "catalog/dependency.h" #include "catalog/index.h" #include "catalog/pg_am.h" +#include "catalog/pg_attrdef.h" #include "catalog/pg_attribute.h" #include "catalog/pg_enum.h" #include "catalog/pg_extension.h" @@ -50,6 +51,7 @@ #include "tcop/pquery.h" #include "tcop/tcopprot.h" #include "utils/builtins.h" +#include "utils/fmgroids.h" #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/memutils.h" @@ -1696,52 +1698,39 @@ PropagatePrerequisiteObjectsForDistributedTable(Oid relationId) void EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId, Oid ownerRelationId) { - List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE); - citusTableIdList = list_append_unique_oid(citusTableIdList, ownerRelationId); + Oid attrDefOid; + List *attrDefOids = GetAttrDefsFromSequence(seqOid); - Oid citusTableId = InvalidOid; - foreach_oid(citusTableId, citusTableIdList) + foreach_oid(attrDefOid, attrDefOids) { - List *seqInfoList = NIL; - GetDependentSequencesWithRelation(citusTableId, &seqInfoList, 0, DEPENDENCY_AUTO); + ObjectAddress columnAddress = GetAttrDefaultColumnAddress(attrDefOid); - SequenceInfo *seqInfo = NULL; - foreach_ptr(seqInfo, seqInfoList) + /* + * If another distributed table is using the same sequence + * in one of its column defaults, make sure the types of the + * columns match. + * + * We skip non-distributed tables, but we need to check the current + * table as it might reference the same sequence multiple times. + */ + if (columnAddress.objectId != ownerRelationId && + !IsCitusTable(columnAddress.objectId)) { - AttrNumber currentAttnum = seqInfo->attributeNumber; - Oid currentSeqOid = seqInfo->sequenceOid; - - if (!seqInfo->isNextValDefault) - { - /* - * If a sequence is not on the nextval, we don't need any check. - * This is a dependent sequence via ALTER SEQUENCE .. OWNED BY col - */ - continue; - } - - /* - * If another distributed table is using the same sequence - * in one of its column defaults, make sure the types of the - * columns match - */ - if (currentSeqOid == seqOid) - { - Oid currentAttributeTypId = GetAttributeTypeOid(citusTableId, - currentAttnum); - if (attributeTypeId != currentAttributeTypId) - { - char *sequenceName = generate_qualified_relation_name( - seqOid); - char *citusTableName = - generate_qualified_relation_name(citusTableId); - ereport(ERROR, (errmsg( - "The sequence %s is already used for a different" - " type in column %d of the table %s", - sequenceName, currentAttnum, - citusTableName))); - } - } + continue; + } + Oid currentAttributeTypId = GetAttributeTypeOid(columnAddress.objectId, + columnAddress.objectSubId); + if (attributeTypeId != currentAttributeTypId) + { + char *sequenceName = generate_qualified_relation_name( + seqOid); + char *citusTableName = + generate_qualified_relation_name(columnAddress.objectId); + ereport(ERROR, (errmsg( + "The sequence %s is already used for a different" + " type in column %d of the table %s", + sequenceName, columnAddress.objectSubId, + citusTableName))); } } } diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 0eb87ec19..5479a59ed 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -40,15 +40,38 @@ #include "distributed/deparse_shard_query.h" #include "distributed/deparser.h" #include "distributed/listutils.h" +#include "distributed/local_executor.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_sync.h" #include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/relation_access_tracking.h" #include "distributed/serialize_distributed_ddls.h" +#include "distributed/shard_cleaner.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" + +/* + * Used to save original name of the database before it is replaced with a + * temporary name for failure handling purposes in PreprocessCreateDatabaseStmt(). + */ +static char *CreateDatabaseCommandOriginalDbName = NULL; + + +/* + * The format string used when creating a temporary databases for failure + * handling purposes. + * + * The fields are as follows to ensure using a unique name for each temporary + * database: + * - operationId: The operation id returned by RegisterOperationNeedingCleanup(). + * - groupId: The group id of the worker node where CREATE DATABASE command + * is issued from. + */ +#define TEMP_DATABASE_NAME_FMT "citus_temp_database_%lu_%d" + + /* * DatabaseCollationInfo is used to store collation related information of a database. */ @@ -286,8 +309,9 @@ PreprocessAlterDatabaseStmt(Node *node, const char *queryString, * NontransactionalNodeDDLTask to run the command on the workers outside * the transaction block. */ - - return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands); + bool warnForPartialFailure = true; + return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands, + warnForPartialFailure); } else { @@ -453,7 +477,12 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, * * In this stage, we perform validations that we want to ensure before delegating to * previous utility hooks because it might not be convenient to throw an error in an - * implicit transaction that creates a database. + * implicit transaction that creates a database. Also in this stage, we save the original + * database name and replace dbname field with a temporary name for failure handling + * purposes. We let Postgres create the database with the temporary name, insert a cleanup + * record for the temporary database name on all nodes and let PostprocessCreateDatabaseStmt() + * to return the distributed DDL job that both creates the database with the temporary name + * and then renames it back to its original name. * * We also serialize database commands globally by acquiring a Citus specific advisory * lock based on OCLASS_DATABASE on the first primary worker node. @@ -467,24 +496,56 @@ PreprocessCreateDatabaseStmt(Node *node, const char *queryString, return NIL; } - EnsurePropagationToCoordinator(); + EnsureCoordinatorIsInMetadata(); CreatedbStmt *stmt = castNode(CreatedbStmt, node); EnsureSupportedCreateDatabaseCommand(stmt); SerializeDistributedDDLsOnObjectClass(OCLASS_DATABASE); + OperationId operationId = RegisterOperationNeedingCleanup(); + + char *tempDatabaseName = psprintf(TEMP_DATABASE_NAME_FMT, + operationId, GetLocalGroupId()); + + List *remoteNodes = TargetWorkerSetNodeList(ALL_SHARD_NODES, RowShareLock); + WorkerNode *remoteNode = NULL; + foreach_ptr(remoteNode, remoteNodes) + { + InsertCleanupRecordOutsideTransaction( + CLEANUP_OBJECT_DATABASE, + pstrdup(quote_identifier(tempDatabaseName)), + remoteNode->groupId, + CLEANUP_ON_FAILURE + ); + } + + CreateDatabaseCommandOriginalDbName = stmt->dbname; + stmt->dbname = tempDatabaseName; + + /* + * Delete cleanup records in the same transaction so that if the current + * transactions fails for some reason, then the cleanup records won't be + * deleted. In the happy path, we will delete the cleanup records without + * deferring them to the background worker. + */ + FinalizeOperationNeedingCleanupOnSuccess("create database"); + return NIL; } /* * PostprocessCreateDatabaseStmt is executed after the statement is applied to the local - * postgres instance. In this stage we prepare the commands that need to be run on - * all workers to create the database. Since the CREATE DATABASE statement gives error - * in a transaction block, we need to use NontransactionalNodeDDLTaskList to send the - * CREATE DATABASE statement to the workers. + * postgres instance. * + * In this stage, we first rename the temporary database back to its original name for + * local node and then return a list of distributed DDL jobs to create the database with + * the temporary name and then to rename it back to its original name. That way, if CREATE + * DATABASE fails on any of the nodes, the temporary database will be cleaned up by the + * cleanup records that we inserted in PreprocessCreateDatabaseStmt() and in case of a + * failure, we won't leak any databases called as the name that user intended to use for + * the database. */ List * PostprocessCreateDatabaseStmt(Node *node, const char *queryString) @@ -508,20 +569,71 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString) char *createDatabaseCommand = DeparseTreeNode(node); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) createDatabaseCommand, - ENABLE_DDL_PROPAGATION); + List *createDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) createDatabaseCommand, + ENABLE_DDL_PROPAGATION); - return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands); + /* + * Since the CREATE DATABASE statements cannot be executed in a transaction + * block, we need to use NontransactionalNodeDDLTaskList() to send the CREATE + * DATABASE statement to the workers. + */ + bool warnForPartialFailure = false; + List *createDatabaseDDLJobList = + NontransactionalNodeDDLTaskList(REMOTE_NODES, createDatabaseCommands, + warnForPartialFailure); + + CreatedbStmt *stmt = castNode(CreatedbStmt, node); + + char *renameDatabaseCommand = + psprintf("ALTER DATABASE %s RENAME TO %s", + quote_identifier(stmt->dbname), + quote_identifier(CreateDatabaseCommandOriginalDbName)); + + List *renameDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION, + renameDatabaseCommand, + ENABLE_DDL_PROPAGATION); + + /* + * We use NodeDDLTaskList() to send the RENAME DATABASE statement to the + * workers because we want to execute it in a coordinated transaction. + */ + List *renameDatabaseDDLJobList = + NodeDDLTaskList(REMOTE_NODES, renameDatabaseCommands); + + /* + * Temporarily disable citus.enable_ddl_propagation before issuing + * rename command locally because we don't want to execute it on remote + * nodes yet. We will execute it on remote nodes by returning it as a + * distributed DDL job. + * + * The reason why we don't want to execute it on remote nodes yet is that + * the database is not created on remote nodes yet. + */ + int saveNestLevel = NewGUCNestLevel(); + set_config_option("citus.enable_ddl_propagation", "off", + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); + + ExecuteUtilityCommand(renameDatabaseCommand); + + AtEOXact_GUC(true, saveNestLevel); + + /* + * Restore the original database name because MarkObjectDistributed() + * resolves oid of the object based on the database name and is called + * after executing the distributed DDL job that renames temporary database. + */ + stmt->dbname = CreateDatabaseCommandOriginalDbName; + + return list_concat(createDatabaseDDLJobList, renameDatabaseDDLJobList); } /* * PreprocessDropDatabaseStmt is executed before the statement is applied to the local * postgres instance. In this stage we can prepare the commands that need to be run on - * all workers to drop the database. Since the DROP DATABASE statement gives error in - * transaction context, we need to use NontransactionalNodeDDLTaskList to send the - * DROP DATABASE statement to the workers. + * all workers to drop the database. * * We also serialize database commands globally by acquiring a Citus specific advisory * lock based on OCLASS_DATABASE on the first primary worker node. @@ -559,11 +671,20 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, char *dropDatabaseCommand = DeparseTreeNode(node); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) dropDatabaseCommand, - ENABLE_DDL_PROPAGATION); + List *dropDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) dropDatabaseCommand, + ENABLE_DDL_PROPAGATION); - return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands); + /* + * Due to same reason stated in PostprocessCreateDatabaseStmt(), we need to + * use NontransactionalNodeDDLTaskList() to send the DROP DATABASE statement + * to the workers. + */ + bool warnForPartialFailure = true; + List *dropDatabaseDDLJobList = + NontransactionalNodeDDLTaskList(REMOTE_NODES, dropDatabaseCommands, + warnForPartialFailure); + return dropDatabaseDDLJobList; } @@ -890,7 +1011,7 @@ CreateDatabaseDDLCommand(Oid dbId) /* Generate the CREATE DATABASE statement */ appendStringInfo(outerDbStmt, - "SELECT pg_catalog.citus_internal_database_command(%s)", + "SELECT citus_internal.database_command(%s)", quote_literal_cstr(createStmt)); ReleaseSysCache(tuple); diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index 2ead0c58a..8d4c6431b 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -1093,33 +1093,26 @@ List * GetDependentFDWsToExtension(Oid extensionId) { List *extensionFDWs = NIL; - ScanKeyData key[3]; - int scanKeyCount = 3; + ScanKeyData key[1]; HeapTuple tup; Relation pgDepend = table_open(DependRelationId, AccessShareLock); ScanKeyInit(&key[0], - Anum_pg_depend_refclassid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(ExtensionRelationId)); - ScanKeyInit(&key[1], - Anum_pg_depend_refobjid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(extensionId)); - ScanKeyInit(&key[2], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ForeignDataWrapperRelationId)); - SysScanDesc scan = systable_beginscan(pgDepend, InvalidOid, false, - NULL, scanKeyCount, key); + SysScanDesc scan = systable_beginscan(pgDepend, DependDependerIndexId, true, + NULL, lengthof(key), key); while (HeapTupleIsValid(tup = systable_getnext(scan))) { Form_pg_depend pgDependEntry = (Form_pg_depend) GETSTRUCT(tup); - if (pgDependEntry->deptype == DEPENDENCY_EXTENSION) + if (pgDependEntry->deptype == DEPENDENCY_EXTENSION && + pgDependEntry->refclassid == ExtensionRelationId && + pgDependEntry->refobjid == extensionId) { extensionFDWs = lappend_oid(extensionFDWs, pgDependEntry->objid); } diff --git a/src/backend/distributed/commands/foreign_constraint.c b/src/backend/distributed/commands/foreign_constraint.c index c1f2b83b6..2f60c3fb1 100644 --- a/src/backend/distributed/commands/foreign_constraint.c +++ b/src/backend/distributed/commands/foreign_constraint.c @@ -20,6 +20,7 @@ #include "access/xact.h" #include "catalog/namespace.h" #include "catalog/pg_constraint.h" +#include "catalog/pg_depend.h" #include "catalog/pg_type.h" #include "utils/builtins.h" #include "utils/fmgroids.h" @@ -36,6 +37,7 @@ #include "distributed/commands.h" #include "distributed/commands/sequence.h" #include "distributed/coordinator_protocol.h" +#include "distributed/hash_helpers.h" #include "distributed/listutils.h" #include "distributed/multi_join_order.h" #include "distributed/namespace_utils.h" @@ -1198,6 +1200,114 @@ TableHasExternalForeignKeys(Oid relationId) } +/* + * ForeignConstraintMatchesFlags is a function with logic that's very specific + * to GetForeignKeyOids. There's no reason to use it in any other context. + */ +static bool +ForeignConstraintMatchesFlags(Form_pg_constraint constraintForm, + int flags) +{ + if (constraintForm->contype != CONSTRAINT_FOREIGN) + { + return false; + } + + bool inheritedConstraint = OidIsValid(constraintForm->conparentid); + if (inheritedConstraint) + { + /* + * We only consider the constraints that are explicitly created on + * the table as we already process the constraints from parent tables + * implicitly when a command is issued + */ + return false; + } + + bool excludeSelfReference = (flags & EXCLUDE_SELF_REFERENCES); + bool isSelfReference = (constraintForm->conrelid == constraintForm->confrelid); + if (excludeSelfReference && isSelfReference) + { + return false; + } + + Oid otherTableId = InvalidOid; + if (flags & INCLUDE_REFERENCING_CONSTRAINTS) + { + otherTableId = constraintForm->confrelid; + } + else + { + otherTableId = constraintForm->conrelid; + } + + return IsTableTypeIncluded(otherTableId, flags); +} + + +/* + * GetForeignKeyOidsForReferencedTable returns a list of foreign key OIDs that + * reference the relationId and match the given flags. + * + * This is separated from GetForeignKeyOids because we need to scan pg_depend + * instead of pg_constraint directly. The reason for this is that there is no + * index on the confrelid of pg_constraint, so searching by that column + * requires a seqscan. + */ +static List * +GetForeignKeyOidsForReferencedTable(Oid relationId, int flags) +{ + HTAB *foreignKeyOidsSet = CreateSimpleHashSetWithName( + Oid, "ReferencingForeignKeyOidsSet"); + List *foreignKeyOidsList = NIL; + ScanKeyData key[2]; + HeapTuple dependTup; + Relation depRel = table_open(DependRelationId, AccessShareLock); + + ScanKeyInit(&key[0], + Anum_pg_depend_refclassid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); + ScanKeyInit(&key[1], + Anum_pg_depend_refobjid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(relationId)); + SysScanDesc scan = systable_beginscan(depRel, DependReferenceIndexId, true, + NULL, lengthof(key), key); + while (HeapTupleIsValid(dependTup = systable_getnext(scan))) + { + Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(dependTup); + + if (deprec->classid != ConstraintRelationId || + deprec->deptype != DEPENDENCY_NORMAL || + hash_search(foreignKeyOidsSet, &deprec->objid, HASH_FIND, NULL)) + { + continue; + } + + + HeapTuple constraintTup = SearchSysCache1(CONSTROID, ObjectIdGetDatum( + deprec->objid)); + if (!HeapTupleIsValid(constraintTup)) /* can happen during DROP TABLE */ + { + continue; + } + + Form_pg_constraint constraint = (Form_pg_constraint) GETSTRUCT(constraintTup); + if (constraint->confrelid == relationId && + ForeignConstraintMatchesFlags(constraint, flags)) + { + foreignKeyOidsList = lappend_oid(foreignKeyOidsList, constraint->oid); + hash_search(foreignKeyOidsSet, &constraint->oid, HASH_ENTER, NULL); + } + ReleaseSysCache(constraintTup); + } + systable_endscan(scan); + table_close(depRel, AccessShareLock); + return foreignKeyOidsList; +} + + /* * GetForeignKeyOids takes in a relationId, and returns a list of OIDs for * foreign constraints that the relation with relationId is involved according @@ -1207,9 +1317,8 @@ TableHasExternalForeignKeys(Oid relationId) List * GetForeignKeyOids(Oid relationId, int flags) { - AttrNumber pgConstraintTargetAttrNumber = InvalidAttrNumber; - - bool extractReferencing = (flags & INCLUDE_REFERENCING_CONSTRAINTS); + bool extractReferencing PG_USED_FOR_ASSERTS_ONLY = (flags & + INCLUDE_REFERENCING_CONSTRAINTS); bool extractReferenced = (flags & INCLUDE_REFERENCED_CONSTRAINTS); /* @@ -1220,22 +1329,10 @@ GetForeignKeyOids(Oid relationId, int flags) Assert(!(extractReferencing && extractReferenced)); Assert(extractReferencing || extractReferenced); - bool useIndex = false; - Oid indexOid = InvalidOid; - - if (extractReferencing) + if (extractReferenced) { - pgConstraintTargetAttrNumber = Anum_pg_constraint_conrelid; - - useIndex = true; - indexOid = ConstraintRelidTypidNameIndexId; + return GetForeignKeyOidsForReferencedTable(relationId, flags); } - else if (extractReferenced) - { - pgConstraintTargetAttrNumber = Anum_pg_constraint_confrelid; - } - - bool excludeSelfReference = (flags & EXCLUDE_SELF_REFERENCES); List *foreignKeyOids = NIL; @@ -1243,62 +1340,22 @@ GetForeignKeyOids(Oid relationId, int flags) int scanKeyCount = 1; Relation pgConstraint = table_open(ConstraintRelationId, AccessShareLock); - ScanKeyInit(&scanKey[0], pgConstraintTargetAttrNumber, + ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId)); - SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, indexOid, useIndex, + + SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, + ConstraintRelidTypidNameIndexId, true, NULL, scanKeyCount, scanKey); - HeapTuple heapTuple = systable_getnext(scanDescriptor); - while (HeapTupleIsValid(heapTuple)) + HeapTuple heapTuple; + while (HeapTupleIsValid(heapTuple = systable_getnext(scanDescriptor))) { Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple); - if (constraintForm->contype != CONSTRAINT_FOREIGN) + if (ForeignConstraintMatchesFlags(constraintForm, flags)) { - heapTuple = systable_getnext(scanDescriptor); - continue; + foreignKeyOids = lappend_oid(foreignKeyOids, constraintForm->oid); } - - bool inheritedConstraint = OidIsValid(constraintForm->conparentid); - if (inheritedConstraint) - { - /* - * We only consider the constraints that are explicitly created on - * the table as we already process the constraints from parent tables - * implicitly when a command is issued - */ - heapTuple = systable_getnext(scanDescriptor); - continue; - } - - Oid constraintId = constraintForm->oid; - - bool isSelfReference = (constraintForm->conrelid == constraintForm->confrelid); - if (excludeSelfReference && isSelfReference) - { - heapTuple = systable_getnext(scanDescriptor); - continue; - } - - Oid otherTableId = InvalidOid; - if (extractReferencing) - { - otherTableId = constraintForm->confrelid; - } - else if (extractReferenced) - { - otherTableId = constraintForm->conrelid; - } - - if (!IsTableTypeIncluded(otherTableId, flags)) - { - heapTuple = systable_getnext(scanDescriptor); - continue; - } - - foreignKeyOids = lappend_oid(foreignKeyOids, constraintId); - - heapTuple = systable_getnext(scanDescriptor); } systable_endscan(scanDescriptor); diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index c41136176..e97312df2 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -493,6 +493,7 @@ GenerateCreateIndexDDLJob(IndexStmt *createIndexStatement, const char *createInd ddlJob->startNewTransaction = createIndexStatement->concurrent; ddlJob->metadataSyncCommand = createIndexCommand; ddlJob->taskList = CreateIndexTaskList(createIndexStatement); + ddlJob->warnForPartialFailure = true; return ddlJob; } @@ -652,6 +653,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand, "concurrently"); ddlJob->metadataSyncCommand = reindexCommand; ddlJob->taskList = CreateReindexTaskList(relationId, reindexStatement); + ddlJob->warnForPartialFailure = true; ddlJobs = list_make1(ddlJob); } @@ -780,6 +782,7 @@ PreprocessDropIndexStmt(Node *node, const char *dropIndexCommand, ddlJob->metadataSyncCommand = dropIndexCommand; ddlJob->taskList = DropIndexTaskList(distributedRelationId, distributedIndexId, dropIndexStatement); + ddlJob->warnForPartialFailure = true; ddlJobs = list_make1(ddlJob); } diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index 0284ea64d..cb64ef7f5 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -2568,7 +2568,7 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu * Find the shard interval and id for the partition column value for * non-reference tables. * - * For reference table, this function blindly returns the tables single + * For reference table, and single shard distributed table this function blindly returns the tables single * shard. */ ShardInterval *shardInterval = FindShardInterval(partitionColumnValue, cacheEntry); @@ -2663,7 +2663,6 @@ CreateLocalColocatedIntermediateFile(CitusCopyDestReceiver *copyDest, CreateIntermediateResultsDirectory(); const int fileFlags = (O_CREAT | O_RDWR | O_TRUNC); - const int fileMode = (S_IRUSR | S_IWUSR); StringInfo filePath = makeStringInfo(); appendStringInfo(filePath, "%s_%ld", copyDest->colocatedIntermediateResultIdPrefix, @@ -2671,7 +2670,7 @@ CreateLocalColocatedIntermediateFile(CitusCopyDestReceiver *copyDest, const char *fileName = QueryResultFileName(filePath->data); shardState->fileDest = - FileCompatFromFileStart(FileOpenForTransmit(fileName, fileFlags, fileMode)); + FileCompatFromFileStart(FileOpenForTransmit(fileName, fileFlags)); CopyOutState localFileCopyOutState = shardState->copyOutState; bool isBinaryCopy = localFileCopyOutState->binary; diff --git a/src/backend/distributed/commands/non_main_db_distribute_object_ops.c b/src/backend/distributed/commands/non_main_db_distribute_object_ops.c new file mode 100644 index 000000000..b777936d3 --- /dev/null +++ b/src/backend/distributed/commands/non_main_db_distribute_object_ops.c @@ -0,0 +1,351 @@ +/*------------------------------------------------------------------------- + * + * non_main_db_distribute_object_ops.c + * + * Routines to support node-wide object management commands from non-main + * databases. + * + * RunPreprocessNonMainDBCommand and RunPostprocessNonMainDBCommand are + * the entrypoints for this module. These functions are called from + * utility_hook.c to support some of the node-wide object management + * commands from non-main databases. + * + * To add support for a new command type, one needs to define a new + * NonMainDbDistributeObjectOps object within OperationArray. Also, if + * the command requires marking or unmarking some objects as distributed, + * the necessary operations can be implemented in + * RunPreprocessNonMainDBCommand and RunPostprocessNonMainDBCommand. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/xact.h" +#include "catalog/pg_authid_d.h" +#include "nodes/nodes.h" +#include "nodes/parsenodes.h" +#include "utils/builtins.h" + +#include "distributed/commands.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/remote_transaction.h" + + +#define EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER \ + "SELECT citus_internal.execute_command_on_remote_nodes_as_user(%s, %s)" +#define START_MANAGEMENT_TRANSACTION \ + "SELECT citus_internal.start_management_transaction('%lu')" +#define MARK_OBJECT_DISTRIBUTED \ + "SELECT citus_internal.mark_object_distributed(%d, %s, %d, %s)" +#define UNMARK_OBJECT_DISTRIBUTED \ + "SELECT pg_catalog.citus_unmark_object_distributed(%d, %d, %d, %s)" + + +/* + * NonMainDbDistributeObjectOps contains the necessary callbacks / flags to + * support node-wide object management commands from non-main databases. + * + * cannotBeExecutedInTransaction: + * Indicates whether the statement cannot be executed in a transaction. If + * this is set to true, the statement will be executed directly on the main + * database because there are no transactional visibility issues for such + * commands. + * + * checkSupportedObjectType: + * Callback function that checks whether type of the object referred to by + * given statement is supported. Can be NULL if not applicable for the + * statement type. + */ +typedef struct NonMainDbDistributeObjectOps +{ + bool cannotBeExecutedInTransaction; + bool (*checkSupportedObjectType)(Node *parsetree); +} NonMainDbDistributeObjectOps; + + +/* + * checkSupportedObjectType callbacks for OperationArray. + */ +static bool CreateDbStmtCheckSupportedObjectType(Node *node); +static bool DropDbStmtCheckSupportedObjectType(Node *node); +static bool GrantStmtCheckSupportedObjectType(Node *node); +static bool SecLabelStmtCheckSupportedObjectType(Node *node); + +/* + * OperationArray that holds NonMainDbDistributeObjectOps for different command types. + */ +static const NonMainDbDistributeObjectOps *const OperationArray[] = { + [T_CreateRoleStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = false, + .checkSupportedObjectType = NULL + }, + [T_DropRoleStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = false, + .checkSupportedObjectType = NULL + }, + [T_AlterRoleStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = false, + .checkSupportedObjectType = NULL + }, + [T_GrantRoleStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = false, + .checkSupportedObjectType = NULL + }, + [T_CreatedbStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = true, + .checkSupportedObjectType = CreateDbStmtCheckSupportedObjectType + }, + [T_DropdbStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = true, + .checkSupportedObjectType = DropDbStmtCheckSupportedObjectType + }, + [T_GrantStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = false, + .checkSupportedObjectType = GrantStmtCheckSupportedObjectType + }, + [T_SecLabelStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = false, + .checkSupportedObjectType = SecLabelStmtCheckSupportedObjectType + }, +}; + + +/* other static function declarations */ +const NonMainDbDistributeObjectOps * GetNonMainDbDistributeObjectOps(Node *parsetree); +static void CreateRoleStmtMarkDistGloballyOnMainDbs(CreateRoleStmt *createRoleStmt); +static void DropRoleStmtUnmarkDistOnLocalMainDb(DropRoleStmt *dropRoleStmt); +static void MarkObjectDistributedGloballyOnMainDbs(Oid catalogRelId, Oid objectId, + char *objectName); +static void UnmarkObjectDistributedOnLocalMainDb(uint16 catalogRelId, Oid objectId); + + +/* + * RunPreprocessNonMainDBCommand runs the necessary commands for a query, in main + * database before query is run on the local node with PrevProcessUtility. + * + * Returns true if previous utility hook needs to be skipped after completing + * preprocess phase. + */ +bool +RunPreprocessNonMainDBCommand(Node *parsetree) +{ + if (IsMainDB) + { + return false; + } + + const NonMainDbDistributeObjectOps *ops = GetNonMainDbDistributeObjectOps(parsetree); + if (!ops) + { + return false; + } + + char *queryString = DeparseTreeNode(parsetree); + + /* + * For the commands that cannot be executed in a transaction, there are no + * transactional visibility issues. We directly route them to main database + * so that we only have to consider one code-path for such commands. + */ + if (ops->cannotBeExecutedInTransaction) + { + IsMainDBCommandInXact = false; + RunCitusMainDBQuery((char *) queryString); + return true; + } + + IsMainDBCommandInXact = true; + + StringInfo mainDBQuery = makeStringInfo(); + appendStringInfo(mainDBQuery, + START_MANAGEMENT_TRANSACTION, + GetCurrentFullTransactionId().value); + RunCitusMainDBQuery(mainDBQuery->data); + + mainDBQuery = makeStringInfo(); + appendStringInfo(mainDBQuery, + EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER, + quote_literal_cstr(queryString), + quote_literal_cstr(CurrentUserName())); + RunCitusMainDBQuery(mainDBQuery->data); + + if (IsA(parsetree, DropRoleStmt)) + { + DropRoleStmtUnmarkDistOnLocalMainDb((DropRoleStmt *) parsetree); + } + + return false; +} + + +/* + * RunPostprocessNonMainDBCommand runs the necessary commands for a query, in main + * database after query is run on the local node with PrevProcessUtility. + */ +void +RunPostprocessNonMainDBCommand(Node *parsetree) +{ + if (IsMainDB || !GetNonMainDbDistributeObjectOps(parsetree)) + { + return; + } + + if (IsA(parsetree, CreateRoleStmt)) + { + CreateRoleStmtMarkDistGloballyOnMainDbs((CreateRoleStmt *) parsetree); + } +} + + +/* + * GetNonMainDbDistributeObjectOps returns the NonMainDbDistributeObjectOps for given + * command if it's node-wide object management command that's supported from non-main + * databases. + */ +const NonMainDbDistributeObjectOps * +GetNonMainDbDistributeObjectOps(Node *parsetree) +{ + NodeTag tag = nodeTag(parsetree); + if (tag >= lengthof(OperationArray)) + { + return NULL; + } + + const NonMainDbDistributeObjectOps *ops = OperationArray[tag]; + + if (ops == NULL) + { + return NULL; + } + + if (!ops->checkSupportedObjectType || + ops->checkSupportedObjectType(parsetree)) + { + return ops; + } + + return NULL; +} + + +/* + * CreateRoleStmtMarkDistGloballyOnMainDbs marks the role as + * distributed on all main databases globally. + */ +static void +CreateRoleStmtMarkDistGloballyOnMainDbs(CreateRoleStmt *createRoleStmt) +{ + /* object must exist as we've just created it */ + bool missingOk = false; + Oid roleId = get_role_oid(createRoleStmt->role, missingOk); + + MarkObjectDistributedGloballyOnMainDbs(AuthIdRelationId, roleId, + createRoleStmt->role); +} + + +/* + * DropRoleStmtUnmarkDistOnLocalMainDb unmarks the roles as + * distributed on the local main database. + */ +static void +DropRoleStmtUnmarkDistOnLocalMainDb(DropRoleStmt *dropRoleStmt) +{ + RoleSpec *roleSpec = NULL; + foreach_ptr(roleSpec, dropRoleStmt->roles) + { + Oid roleOid = get_role_oid(roleSpec->rolename, + dropRoleStmt->missing_ok); + if (roleOid == InvalidOid) + { + continue; + } + + UnmarkObjectDistributedOnLocalMainDb(AuthIdRelationId, roleOid); + } +} + + +/* + * MarkObjectDistributedGloballyOnMainDbs marks an object as + * distributed on all main databases globally. + */ +static void +MarkObjectDistributedGloballyOnMainDbs(Oid catalogRelId, Oid objectId, char *objectName) +{ + StringInfo mainDBQuery = makeStringInfo(); + appendStringInfo(mainDBQuery, + MARK_OBJECT_DISTRIBUTED, + catalogRelId, + quote_literal_cstr(objectName), + objectId, + quote_literal_cstr(CurrentUserName())); + RunCitusMainDBQuery(mainDBQuery->data); +} + + +/* + * UnmarkObjectDistributedOnLocalMainDb unmarks an object as + * distributed on the local main database. + */ +static void +UnmarkObjectDistributedOnLocalMainDb(uint16 catalogRelId, Oid objectId) +{ + const int subObjectId = 0; + const char *checkObjectExistence = "false"; + + StringInfo query = makeStringInfo(); + appendStringInfo(query, + UNMARK_OBJECT_DISTRIBUTED, + catalogRelId, objectId, + subObjectId, checkObjectExistence); + RunCitusMainDBQuery(query->data); +} + + +/* + * checkSupportedObjectTypes callbacks for OperationArray lie below. + */ +static bool +CreateDbStmtCheckSupportedObjectType(Node *node) +{ + /* + * We don't try to send the query to the main database if the CREATE + * DATABASE command is for the main database itself, this is a very + * rare case but it's exercised by our test suite. + */ + CreatedbStmt *stmt = castNode(CreatedbStmt, node); + return strcmp(stmt->dbname, MainDb) != 0; +} + + +static bool +DropDbStmtCheckSupportedObjectType(Node *node) +{ + /* + * We don't try to send the query to the main database if the DROP + * DATABASE command is for the main database itself, this is a very + * rare case but it's exercised by our test suite. + */ + DropdbStmt *stmt = castNode(DropdbStmt, node); + return strcmp(stmt->dbname, MainDb) != 0; +} + + +static bool +GrantStmtCheckSupportedObjectType(Node *node) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + return stmt->objtype == OBJECT_DATABASE; +} + + +static bool +SecLabelStmtCheckSupportedObjectType(Node *node) +{ + SecLabelStmt *stmt = castNode(SecLabelStmt, node); + return stmt->objtype == OBJECT_ROLE; +} diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index d0b33ccb9..7f5f697f2 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -491,18 +491,17 @@ GenerateRoleOptionsList(HeapTuple tuple) options = lappend(options, makeDefElem("password", NULL, -1)); } - /* load valid unitl data from the heap tuple, use default of infinity if not set */ + /* load valid until data from the heap tuple */ Datum rolValidUntilDatum = SysCacheGetAttr(AUTHNAME, tuple, Anum_pg_authid_rolvaliduntil, &isNull); - char *rolValidUntil = "infinity"; if (!isNull) { - rolValidUntil = pstrdup((char *) timestamptz_to_str(rolValidUntilDatum)); - } + char *rolValidUntil = pstrdup((char *) timestamptz_to_str(rolValidUntilDatum)); - Node *validUntilStringNode = (Node *) makeString(rolValidUntil); - DefElem *validUntilOption = makeDefElem("validUntil", validUntilStringNode, -1); - options = lappend(options, validUntilOption); + Node *validUntilStringNode = (Node *) makeString(rolValidUntil); + DefElem *validUntilOption = makeDefElem("validUntil", validUntilStringNode, -1); + options = lappend(options, validUntilOption); + } return options; } @@ -886,6 +885,14 @@ GenerateGrantRoleStmtsOfRole(Oid roleid) { Form_pg_auth_members membership = (Form_pg_auth_members) GETSTRUCT(tuple); + ObjectAddress *roleAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*roleAddress, AuthIdRelationId, membership->grantor); + if (!IsAnyObjectDistributed(list_make1(roleAddress))) + { + /* we only need to propagate the grant if the grantor is distributed */ + continue; + } + GrantRoleStmt *grantRoleStmt = makeNode(GrantRoleStmt); grantRoleStmt->is_grant = true; @@ -901,7 +908,11 @@ GenerateGrantRoleStmtsOfRole(Oid roleid) granteeRole->rolename = GetUserNameFromId(membership->member, true); grantRoleStmt->grantee_roles = list_make1(granteeRole); - grantRoleStmt->grantor = NULL; + RoleSpec *grantorRole = makeNode(RoleSpec); + grantorRole->roletype = ROLESPEC_CSTRING; + grantorRole->location = -1; + grantorRole->rolename = GetUserNameFromId(membership->grantor, false); + grantRoleStmt->grantor = grantorRole; #if PG_VERSION_NUM >= PG_VERSION_16 @@ -1241,12 +1252,6 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString, return NIL; } - /* - * Postgres don't seem to use the grantor. Even dropping the grantor doesn't - * seem to affect the membership. If this changes, we might need to add grantors - * to the dependency resolution too. For now we just don't propagate it. - */ - stmt->grantor = NULL; stmt->grantee_roles = distributedGranteeRoles; char *sql = DeparseTreeNode((Node *) stmt); stmt->grantee_roles = allGranteeRoles; diff --git a/src/backend/distributed/commands/seclabel.c b/src/backend/distributed/commands/seclabel.c index 3e1847dc9..1d274a056 100644 --- a/src/backend/distributed/commands/seclabel.c +++ b/src/backend/distributed/commands/seclabel.c @@ -29,7 +29,7 @@ List * PostprocessSecLabelStmt(Node *node, const char *queryString) { - if (!ShouldPropagate()) + if (!EnableAlterRolePropagation || !ShouldPropagate()) { return NIL; } @@ -59,21 +59,17 @@ PostprocessSecLabelStmt(Node *node, const char *queryString) return NIL; } - if (!EnableCreateRolePropagation) - { - return NIL; - } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses); - const char *sql = DeparseTreeNode((Node *) secLabelStmt); + const char *secLabelCommands = DeparseTreeNode((Node *) secLabelStmt); List *commandList = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, + (void *) secLabelCommands, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList); + return NodeDDLTaskList(REMOTE_NODES, commandList); } diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index 4d838a882..cfb55faf7 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -14,6 +14,7 @@ #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/namespace.h" +#include "catalog/pg_attrdef.h" #include "commands/defrem.h" #include "commands/extension.h" #include "nodes/makefuncs.h" @@ -507,22 +508,14 @@ PreprocessAlterSequenceStmt(Node *node, const char *queryString, static Oid SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress, char depType) { - List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE); - Oid citusTableId = InvalidOid; - foreach_oid(citusTableId, citusTableIdList) + Oid relationId; + List *relations = GetDependentRelationsWithSequence(sequenceAddress->objectId, + depType); + foreach_oid(relationId, relations) { - List *seqInfoList = NIL; - GetDependentSequencesWithRelation(citusTableId, &seqInfoList, 0, depType); - SequenceInfo *seqInfo = NULL; - foreach_ptr(seqInfo, seqInfoList) + if (IsCitusTable(relationId)) { - /* - * This sequence is used in a distributed table - */ - if (seqInfo->sequenceOid == sequenceAddress->objectId) - { - return citusTableId; - } + return relationId; } } diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 074a789ed..30b028b79 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -3053,11 +3053,15 @@ ErrorUnsupportedAlterTableAddColumn(Oid relationId, AlterTableCmd *command, else if (constraint->contype == CONSTR_FOREIGN) { RangeVar *referencedTable = constraint->pktable; - char *referencedColumn = strVal(lfirst(list_head(constraint->pk_attrs))); Oid referencedRelationId = RangeVarGetRelid(referencedTable, NoLock, false); - appendStringInfo(errHint, "FOREIGN KEY (%s) REFERENCES %s(%s)", colName, - get_rel_name(referencedRelationId), referencedColumn); + appendStringInfo(errHint, "FOREIGN KEY (%s) REFERENCES %s", colName, + get_rel_name(referencedRelationId)); + + if (list_length(constraint->pk_attrs) > 0) + { + AppendColumnNameList(errHint, constraint->pk_attrs); + } if (constraint->fk_del_action == FKCONSTR_ACTION_SETNULL) { diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 68af4b7b5..9426e13c0 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -87,14 +87,6 @@ #include "distributed/worker_shard_visibility.h" #include "distributed/worker_transaction.h" -#define EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER \ - "SELECT citus_internal.execute_command_on_remote_nodes_as_user(%s, %s)" -#define START_MANAGEMENT_TRANSACTION \ - "SELECT citus_internal.start_management_transaction('%lu')" -#define MARK_OBJECT_DISTRIBUTED \ - "SELECT citus_internal.mark_object_distributed(%d, %s, %d, %s)" - - bool EnableDDLPropagation = true; /* ddl propagation is enabled */ int CreateObjectPropagationMode = CREATE_OBJECT_PROPAGATION_IMMEDIATE; PropSetCmdBehavior PropagateSetCommands = PROPSETCMD_NONE; /* SET prop off */ @@ -122,8 +114,7 @@ static void PostStandardProcessUtility(Node *parsetree); static void DecrementUtilityHookCountersIfNecessary(Node *parsetree); static bool IsDropSchemaOrDB(Node *parsetree); static bool ShouldCheckUndistributeCitusLocalTables(void); -static void RunPreprocessMainDBCommand(Node *parsetree, const char *queryString); -static void RunPostprocessMainDBCommand(Node *parsetree); + /* * ProcessUtilityParseTree is a convenience method to create a PlannedStmt out of @@ -255,23 +246,27 @@ citus_ProcessUtility(PlannedStmt *pstmt, if (!CitusHasBeenLoaded()) { - if (!IsMainDB) - { - RunPreprocessMainDBCommand(parsetree, queryString); - } - /* - * Ensure that utility commands do not behave any differently until CREATE - * EXTENSION is invoked. + * Process the command via RunPreprocessNonMainDBCommand and + * RunPostprocessNonMainDBCommand hooks if we're in a non-main database + * and if the command is a node-wide object management command that we + * support from non-main databases. */ - PrevProcessUtility(pstmt, queryString, false, context, - params, queryEnv, dest, completionTag); - if (!IsMainDB) + bool shouldSkipPrevUtilityHook = RunPreprocessNonMainDBCommand(parsetree); + + if (!shouldSkipPrevUtilityHook) { - RunPostprocessMainDBCommand(parsetree); + /* + * Ensure that utility commands do not behave any differently until CREATE + * EXTENSION is invoked. + */ + PrevProcessUtility(pstmt, queryString, false, context, + params, queryEnv, dest, completionTag); } + RunPostprocessNonMainDBCommand(parsetree); + return; } else if (IsA(parsetree, CallStmt)) @@ -738,6 +733,13 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt, errhint("Connect to other nodes directly to manually create all" " necessary users and roles."))); } + else if (IsA(parsetree, SecLabelStmt) && !EnableAlterRolePropagation) + { + ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands to other" + " nodes"), + errhint("Connect to other nodes directly to manually assign" + " necessary labels."))); + } /* * Make sure that on DROP EXTENSION we terminate the background daemon @@ -1287,7 +1289,7 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) errhint("Use DROP INDEX CONCURRENTLY IF EXISTS to remove the " "invalid index, then retry the original command."))); } - else + else if (ddlJob->warnForPartialFailure) { ereport(WARNING, (errmsg( @@ -1296,9 +1298,9 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) "state.\nIf the problematic command is a CREATE operation, " "consider using the 'IF EXISTS' syntax to drop the object," "\nif applicable, and then re-attempt the original command."))); - - PG_RE_THROW(); } + + PG_RE_THROW(); } PG_END_TRY(); } @@ -1514,9 +1516,12 @@ DDLTaskList(Oid relationId, const char *commandString) * NontransactionalNodeDDLTaskList builds a list of tasks to execute a DDL command on a * given target set of nodes with cannotBeExecutedInTransaction is set to make sure * that task list is executed outside a transaction block. + * + * Also sets warnForPartialFailure for the returned DDLJobs. */ List * -NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands) +NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands, + bool warnForPartialFailure) { List *ddlJobs = NodeDDLTaskList(targets, commands); DDLJob *ddlJob = NULL; @@ -1527,6 +1532,8 @@ NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands) { task->cannotBeExecutedInTransaction = true; } + + ddlJob->warnForPartialFailure = warnForPartialFailure; } return ddlJobs; } @@ -1594,50 +1601,3 @@ DropSchemaOrDBInProgress(void) { return activeDropSchemaOrDBs > 0; } - - -/* - * RunPreprocessMainDBCommand runs the necessary commands for a query, in main - * database before query is run on the local node with PrevProcessUtility - */ -static void -RunPreprocessMainDBCommand(Node *parsetree, const char *queryString) -{ - if (IsA(parsetree, CreateRoleStmt)) - { - StringInfo mainDBQuery = makeStringInfo(); - appendStringInfo(mainDBQuery, - START_MANAGEMENT_TRANSACTION, - GetCurrentFullTransactionId().value); - RunCitusMainDBQuery(mainDBQuery->data); - mainDBQuery = makeStringInfo(); - appendStringInfo(mainDBQuery, - EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER, - quote_literal_cstr(queryString), - quote_literal_cstr(CurrentUserName())); - RunCitusMainDBQuery(mainDBQuery->data); - } -} - - -/* - * RunPostprocessMainDBCommand runs the necessary commands for a query, in main - * database after query is run on the local node with PrevProcessUtility - */ -static void -RunPostprocessMainDBCommand(Node *parsetree) -{ - if (IsA(parsetree, CreateRoleStmt)) - { - StringInfo mainDBQuery = makeStringInfo(); - CreateRoleStmt *createRoleStmt = castNode(CreateRoleStmt, parsetree); - Oid roleOid = get_role_oid(createRoleStmt->role, false); - appendStringInfo(mainDBQuery, - MARK_OBJECT_DISTRIBUTED, - AuthIdRelationId, - quote_literal_cstr(createRoleStmt->role), - roleOid, - quote_literal_cstr(CurrentUserName())); - RunCitusMainDBQuery(mainDBQuery->data); - } -} diff --git a/src/backend/distributed/connection/connection_configuration.c b/src/backend/distributed/connection/connection_configuration.c index ac82d4e09..3913173e2 100644 --- a/src/backend/distributed/connection/connection_configuration.c +++ b/src/backend/distributed/connection/connection_configuration.c @@ -271,9 +271,24 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, * We allocate everything in the provided context so as to facilitate using * pfree on all runtime parameters when connections using these entries are * invalidated during config reloads. + * + * Also, when "host" is already provided in global parameters, we use hostname + * from the key as "hostaddr" instead of "host" to avoid host name lookup. In + * that case, the value for "host" becomes useful only if the authentication + * method requires it. */ + bool gotHostParamFromGlobalParams = false; + for (Size paramIndex = 0; paramIndex < ConnParams.size; paramIndex++) + { + if (strcmp(ConnParams.keywords[paramIndex], "host") == 0) + { + gotHostParamFromGlobalParams = true; + break; + } + } + const char *runtimeKeywords[] = { - "host", + gotHostParamFromGlobalParams ? "hostaddr" : "host", "port", "dbname", "user", diff --git a/src/backend/distributed/connection/remote_commands.c b/src/backend/distributed/connection/remote_commands.c index 4b46e96d2..cbd74ff51 100644 --- a/src/backend/distributed/connection/remote_commands.c +++ b/src/backend/distributed/connection/remote_commands.c @@ -883,7 +883,7 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts) palloc(totalConnectionCount * sizeof(MultiConnection *)); WaitEvent *events = palloc(totalConnectionCount * sizeof(WaitEvent)); bool *connectionReady = palloc(totalConnectionCount * sizeof(bool)); - WaitEventSet *waitEventSet = NULL; + WaitEventSet *volatile waitEventSet = NULL; /* convert connection list to an array such that we can move items around */ MultiConnection *connectionItem = NULL; diff --git a/src/backend/distributed/deparser/citus_grantutils.c b/src/backend/distributed/deparser/citus_grantutils.c index c944013f6..8354e0479 100644 --- a/src/backend/distributed/deparser/citus_grantutils.c +++ b/src/backend/distributed/deparser/citus_grantutils.c @@ -74,7 +74,7 @@ AppendGrantRestrictAndCascade(StringInfo buf, GrantStmt *stmt) void AppendGrantedByInGrantForRoleSpec(StringInfo buf, RoleSpec *grantor, bool isGrant) { - if (isGrant && grantor) + if (grantor) { appendStringInfo(buf, " GRANTED BY %s", RoleSpecString(grantor, true)); } diff --git a/src/backend/distributed/deparser/deparse_database_stmts.c b/src/backend/distributed/deparser/deparse_database_stmts.c index 30ac3f32c..66df5361e 100644 --- a/src/backend/distributed/deparser/deparse_database_stmts.c +++ b/src/backend/distributed/deparser/deparse_database_stmts.c @@ -277,6 +277,11 @@ AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt) /* * Make sure that we don't try to deparse something that this * function doesn't expect. + * + * This is also useful to throw an error for unsupported CREATE + * DATABASE options when the command is issued from non-main dbs + * because we use the same function to deparse CREATE DATABASE + * commands there too. */ EnsureSupportedCreateDatabaseCommand(stmt); diff --git a/src/backend/distributed/deparser/deparse_role_stmts.c b/src/backend/distributed/deparser/deparse_role_stmts.c index b86841345..a4a085026 100644 --- a/src/backend/distributed/deparser/deparse_role_stmts.c +++ b/src/backend/distributed/deparser/deparse_role_stmts.c @@ -488,7 +488,6 @@ AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt) AppendGrantWithAdminOption(buf, stmt); AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant); AppendGrantRestrictAndCascadeForRoleSpec(buf, stmt->behavior, stmt->is_grant); - AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant); appendStringInfo(buf, ";"); } diff --git a/src/backend/distributed/deparser/deparse_table_stmts.c b/src/backend/distributed/deparser/deparse_table_stmts.c index e976b0e2f..5d184fa66 100644 --- a/src/backend/distributed/deparser/deparse_table_stmts.c +++ b/src/backend/distributed/deparser/deparse_table_stmts.c @@ -121,7 +121,7 @@ AppendAlterTableStmt(StringInfo buf, AlterTableStmt *stmt) * AppendColumnNameList converts a list of columns into comma separated string format * (colname_1, colname_2, .., colname_n). */ -static void +void AppendColumnNameList(StringInfo buf, List *columns) { appendStringInfoString(buf, " ("); diff --git a/src/backend/distributed/executor/intermediate_results.c b/src/backend/distributed/executor/intermediate_results.c index 0e18d4416..daf707b24 100644 --- a/src/backend/distributed/executor/intermediate_results.c +++ b/src/backend/distributed/executor/intermediate_results.c @@ -295,7 +295,6 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest) if (resultDest->writeLocalFile) { const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); /* make sure the directory exists */ CreateIntermediateResultsDirectory(); @@ -303,8 +302,7 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest) const char *fileName = QueryResultFileName(resultId); resultDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit(fileName, - fileFlags, - fileMode)); + fileFlags)); } WorkerNode *workerNode = NULL; @@ -606,7 +604,7 @@ CreateIntermediateResultsDirectory(void) { char *resultDirectory = IntermediateResultsDirectory(); - int makeOK = mkdir(resultDirectory, S_IRWXU); + int makeOK = MakePGDirectory(resultDirectory); if (makeOK != 0) { if (errno == EEXIST) @@ -976,7 +974,6 @@ FetchRemoteIntermediateResult(MultiConnection *connection, char *resultId) StringInfo copyCommand = makeStringInfo(); const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); PGconn *pgConn = connection->pgConn; int socket = PQsocket(pgConn); @@ -998,7 +995,7 @@ FetchRemoteIntermediateResult(MultiConnection *connection, char *resultId) PQclear(result); - File fileDesc = FileOpenForTransmit(localPath, fileFlags, fileMode); + File fileDesc = FileOpenForTransmit(localPath, fileFlags); FileCompat fileCompat = FileCompatFromFileStart(fileDesc); while (true) diff --git a/src/backend/distributed/executor/transmit.c b/src/backend/distributed/executor/transmit.c index a10ae4fbf..224d8e589 100644 --- a/src/backend/distributed/executor/transmit.c +++ b/src/backend/distributed/executor/transmit.c @@ -17,6 +17,7 @@ #include "pgstat.h" #include "commands/defrem.h" +#include "common/file_perm.h" #include "libpq/libpq.h" #include "libpq/pqformat.h" #include "storage/fd.h" @@ -48,8 +49,7 @@ RedirectCopyDataToRegularFile(const char *filename) { StringInfo copyData = makeStringInfo(); const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); - File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode); + File fileDesc = FileOpenForTransmit(filename, fileFlags); FileCompat fileCompat = FileCompatFromFileStart(fileDesc); SendCopyInStart(); @@ -92,7 +92,7 @@ SendRegularFile(const char *filename) const int fileMode = 0; /* we currently do not check if the caller has permissions for this file */ - File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode); + File fileDesc = FileOpenForTransmitPerm(filename, fileFlags, fileMode); FileCompat fileCompat = FileCompatFromFileStart(fileDesc); /* @@ -136,12 +136,23 @@ FreeStringInfo(StringInfo stringInfo) /* - * FileOpenForTransmit opens file with the given filename and flags. On success, - * the function returns the internal file handle for the opened file. On failure - * the function errors out. + * Open a file with FileOpenForTransmitPerm() and pass default file mode for + * the fileMode parameter. */ File -FileOpenForTransmit(const char *filename, int fileFlags, int fileMode) +FileOpenForTransmit(const char *filename, int fileFlags) +{ + return FileOpenForTransmitPerm(filename, fileFlags, pg_file_create_mode); +} + + +/* + * FileOpenForTransmitPerm opens file with the given filename and flags. On + * success, the function returns the internal file handle for the opened file. + * On failure the function errors out. + */ +File +FileOpenForTransmitPerm(const char *filename, int fileFlags, int fileMode) { struct stat fileStat; diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index 007d07bdc..ff5b2c7a9 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -98,10 +98,10 @@ mark_object_distributed(PG_FUNCTION_ARGS) /* - * citus_unmark_object_distributed(classid oid, objid oid, objsubid int) + * citus_unmark_object_distributed(classid oid, objid oid, objsubid int,checkobjectexistence bool) * - * removes the entry for an object address from pg_dist_object. Only removes the entry if - * the object does not exist anymore. + * Removes the entry for an object address from pg_dist_object. If checkobjectexistence is true, + * throws an error if the object still exists. */ Datum citus_unmark_object_distributed(PG_FUNCTION_ARGS) @@ -109,6 +109,12 @@ citus_unmark_object_distributed(PG_FUNCTION_ARGS) Oid classid = PG_GETARG_OID(0); Oid objid = PG_GETARG_OID(1); int32 objsubid = PG_GETARG_INT32(2); + bool checkObjectExistence = true; + if (!PG_ARGISNULL(3)) + { + checkObjectExistence = PG_GETARG_BOOL(3); + } + ObjectAddress address = { 0 }; ObjectAddressSubSet(address, classid, objid, objsubid); @@ -119,7 +125,7 @@ citus_unmark_object_distributed(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } - if (ObjectExists(&address)) + if (checkObjectExistence && ObjectExists(&address)) { ereport(ERROR, (errmsg("object still exists"), errdetail("the %s \"%s\" still exists", diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 402dedb8a..4f1b942a0 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -2522,6 +2522,8 @@ AvailableExtensionVersion(void) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("citus extension is not found"))); + + return NULL; /* keep compiler happy */ } diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 2462a0915..ef7c56dc7 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -492,19 +492,7 @@ stop_metadata_sync_to_node(PG_FUNCTION_ARGS) bool ClusterHasKnownMetadataWorkers() { - bool workerWithMetadata = false; - - if (!IsCoordinator()) - { - workerWithMetadata = true; - } - - if (workerWithMetadata || HasMetadataWorkers()) - { - return true; - } - - return false; + return !IsCoordinator() || HasMetadataWorkers(); } @@ -1176,7 +1164,7 @@ DistributionDeleteMetadataCommand(Oid relationId) char *qualifiedRelationName = generate_qualified_relation_name(relationId); appendStringInfo(deleteCommand, - "SELECT pg_catalog.citus_internal_delete_partition_metadata(%s)", + "SELECT citus_internal.delete_partition_metadata(%s)", quote_literal_cstr(qualifiedRelationName)); return deleteCommand->data; @@ -1259,7 +1247,7 @@ ShardListInsertCommand(List *shardIntervalList) appendStringInfo(insertPlacementCommand, ") "); appendStringInfo(insertPlacementCommand, - "SELECT citus_internal_add_placement_metadata(" + "SELECT citus_internal.add_placement_metadata(" "shardid, shardlength, groupid, placementid) " "FROM placement_data;"); @@ -1315,7 +1303,7 @@ ShardListInsertCommand(List *shardIntervalList) appendStringInfo(insertShardCommand, ") "); appendStringInfo(insertShardCommand, - "SELECT citus_internal_add_shard_metadata(relationname, shardid, " + "SELECT citus_internal.add_shard_metadata(relationname, shardid, " "storagetype, shardminvalue, shardmaxvalue) " "FROM shard_data;"); @@ -1354,7 +1342,7 @@ ShardDeleteCommandList(ShardInterval *shardInterval) StringInfo deleteShardCommand = makeStringInfo(); appendStringInfo(deleteShardCommand, - "SELECT citus_internal_delete_shard_metadata(%ld);", shardId); + "SELECT citus_internal.delete_shard_metadata(%ld);", shardId); return list_make1(deleteShardCommand->data); } @@ -1424,7 +1412,7 @@ ColocationIdUpdateCommand(Oid relationId, uint32 colocationId) StringInfo command = makeStringInfo(); char *qualifiedRelationName = generate_qualified_relation_name(relationId); appendStringInfo(command, - "SELECT citus_internal_update_relation_colocation(%s::regclass, %d)", + "SELECT citus_internal.update_relation_colocation(%s::regclass, %d)", quote_literal_cstr(qualifiedRelationName), colocationId); return command->data; @@ -1649,6 +1637,74 @@ GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList, } +/* + * GetDependentDependentRelationsWithSequence returns a list of oids of + * relations that have have a dependency on the given sequence. + * There are three types of dependencies: + * 1. direct auto (owned sequences), created using SERIAL or BIGSERIAL + * 2. indirect auto (through an AttrDef), created using DEFAULT nextval('..') + * 3. internal, created using GENERATED ALWAYS AS IDENTITY + * + * Depending on the passed deptype, we return the relations that have the + * given type(s): + * - DEPENDENCY_AUTO returns both 1 and 2 + * - DEPENDENCY_INTERNAL returns 3 + * + * The returned list can contain duplicates, as the same relation can have + * multiple dependencies on the sequence. + */ +List * +GetDependentRelationsWithSequence(Oid sequenceOid, char depType) +{ + List *relations = NIL; + ScanKeyData key[2]; + HeapTuple tup; + + Relation depRel = table_open(DependRelationId, AccessShareLock); + + ScanKeyInit(&key[0], + Anum_pg_depend_classid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); + ScanKeyInit(&key[1], + Anum_pg_depend_objid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(sequenceOid)); + SysScanDesc scan = systable_beginscan(depRel, DependDependerIndexId, true, + NULL, lengthof(key), key); + while (HeapTupleIsValid(tup = systable_getnext(scan))) + { + Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup); + + if ( + deprec->refclassid == RelationRelationId && + deprec->refobjsubid != 0 && + deprec->deptype == depType) + { + relations = lappend_oid(relations, deprec->refobjid); + } + } + + systable_endscan(scan); + + table_close(depRel, AccessShareLock); + + if (depType == DEPENDENCY_AUTO) + { + Oid attrDefOid; + List *attrDefOids = GetAttrDefsFromSequence(sequenceOid); + + foreach_oid(attrDefOid, attrDefOids) + { + ObjectAddress columnAddress = GetAttrDefaultColumnAddress(attrDefOid); + relations = lappend_oid(relations, columnAddress.objectId); + } + } + + return relations; +} + + /* * GetSequencesFromAttrDef returns a list of sequence OIDs that have * dependency with the given attrdefOid in pg_depend @@ -1694,6 +1750,90 @@ GetSequencesFromAttrDef(Oid attrdefOid) } +#if PG_VERSION_NUM < PG_VERSION_15 + +/* + * Given a pg_attrdef OID, return the relation OID and column number of + * the owning column (represented as an ObjectAddress for convenience). + * + * Returns InvalidObjectAddress if there is no such pg_attrdef entry. + */ +ObjectAddress +GetAttrDefaultColumnAddress(Oid attrdefoid) +{ + ObjectAddress result = InvalidObjectAddress; + ScanKeyData skey[1]; + HeapTuple tup; + + Relation attrdef = table_open(AttrDefaultRelationId, AccessShareLock); + ScanKeyInit(&skey[0], + Anum_pg_attrdef_oid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(attrdefoid)); + SysScanDesc scan = systable_beginscan(attrdef, AttrDefaultOidIndexId, true, + NULL, 1, skey); + + if (HeapTupleIsValid(tup = systable_getnext(scan))) + { + Form_pg_attrdef atdform = (Form_pg_attrdef) GETSTRUCT(tup); + + result.classId = RelationRelationId; + result.objectId = atdform->adrelid; + result.objectSubId = atdform->adnum; + } + + systable_endscan(scan); + table_close(attrdef, AccessShareLock); + + return result; +} + + +#endif + + +/* + * GetAttrDefsFromSequence returns a list of attrdef OIDs that have + * a dependency on the given sequence + */ +List * +GetAttrDefsFromSequence(Oid seqOid) +{ + List *attrDefsResult = NIL; + ScanKeyData key[2]; + HeapTuple tup; + + Relation depRel = table_open(DependRelationId, AccessShareLock); + + ScanKeyInit(&key[0], + Anum_pg_depend_refclassid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); + ScanKeyInit(&key[1], + Anum_pg_depend_refobjid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(seqOid)); + SysScanDesc scan = systable_beginscan(depRel, DependReferenceIndexId, true, + NULL, lengthof(key), key); + while (HeapTupleIsValid(tup = systable_getnext(scan))) + { + Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup); + + if (deprec->classid == AttrDefaultRelationId && + deprec->deptype == DEPENDENCY_NORMAL) + { + attrDefsResult = lappend_oid(attrDefsResult, deprec->objid); + } + } + + systable_endscan(scan); + + table_close(depRel, AccessShareLock); + + return attrDefsResult; +} + + /* * GetDependentFunctionsWithRelation returns the dependent functions for the * given relation id. @@ -4056,7 +4196,7 @@ citus_internal_database_command(PG_FUNCTION_ARGS) } else { - ereport(ERROR, (errmsg("citus_internal_database_command() can only be used " + ereport(ERROR, (errmsg("citus_internal.database_command() can only be used " "for CREATE DATABASE command by Citus."))); } @@ -4209,7 +4349,7 @@ ColocationGroupDeleteCommand(uint32 colocationId) StringInfo deleteColocationCommand = makeStringInfo(); appendStringInfo(deleteColocationCommand, - "SELECT pg_catalog.citus_internal_delete_colocation_metadata(%d)", + "SELECT citus_internal.delete_colocation_metadata(%d)", colocationId); return deleteColocationCommand->data; @@ -4225,7 +4365,7 @@ TenantSchemaInsertCommand(Oid schemaId, uint32 colocationId) { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT pg_catalog.citus_internal_add_tenant_schema(%s, %u)", + "SELECT citus_internal.add_tenant_schema(%s, %u)", RemoteSchemaIdExpressionById(schemaId), colocationId); return command->data; @@ -4241,7 +4381,7 @@ TenantSchemaDeleteCommand(char *schemaName) { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT pg_catalog.citus_internal_delete_tenant_schema(%s)", + "SELECT citus_internal.delete_tenant_schema(%s)", RemoteSchemaIdExpressionByName(schemaName)); return command->data; @@ -4258,7 +4398,7 @@ UpdateNoneDistTableMetadataCommand(Oid relationId, char replicationModel, { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(%s, '%c', %u, %s)", + "SELECT citus_internal.update_none_dist_table_metadata(%s, '%c', %u, %s)", RemoteTableIdExpression(relationId), replicationModel, colocationId, autoConverted ? "true" : "false"); @@ -4276,7 +4416,7 @@ AddPlacementMetadataCommand(uint64 shardId, uint64 placementId, { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT citus_internal_add_placement_metadata(%ld, %ld, %d, %ld)", + "SELECT citus_internal.add_placement_metadata(%ld, %ld, %d, %ld)", shardId, shardLength, groupId, placementId); return command->data; } @@ -4291,7 +4431,7 @@ DeletePlacementMetadataCommand(uint64 placementId) { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT pg_catalog.citus_internal_delete_placement_metadata(%ld)", + "SELECT citus_internal.delete_placement_metadata(%ld)", placementId); return command->data; } @@ -4957,7 +5097,7 @@ SendTenantSchemaMetadataCommands(MetadataSyncContext *context) StringInfo insertTenantSchemaCommand = makeStringInfo(); appendStringInfo(insertTenantSchemaCommand, - "SELECT pg_catalog.citus_internal_add_tenant_schema(%s, %u)", + "SELECT citus_internal.add_tenant_schema(%s, %u)", RemoteSchemaIdExpressionById(tenantSchemaForm->schemaid), tenantSchemaForm->colocationid); diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index 094986c85..d92205943 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -217,6 +217,9 @@ citus_set_coordinator_host(PG_FUNCTION_ARGS) EnsureTransactionalMetadataSyncMode(); } + /* prevent concurrent modification */ + LockRelationOid(DistNodeRelationId(), RowExclusiveLock); + bool isCoordinatorInMetadata = false; WorkerNode *coordinatorNode = PrimaryNodeForGroup(COORDINATOR_GROUP_ID, &isCoordinatorInMetadata); @@ -507,7 +510,13 @@ citus_disable_node(PG_FUNCTION_ARGS) { text *nodeNameText = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); - bool synchronousDisableNode = PG_GETARG_BOOL(2); + + bool synchronousDisableNode = 1; + Assert(PG_NARGS() == 2 || PG_NARGS() == 3); + if (PG_NARGS() == 3) + { + synchronousDisableNode = PG_GETARG_BOOL(2); + } char *nodeName = text_to_cstring(nodeNameText); WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort); @@ -1692,7 +1701,7 @@ EnsureParentSessionHasExclusiveLockOnPgDistNode(pid_t parentSessionPid) if (!parentHasExclusiveLock) { ereport(ERROR, (errmsg("lock is not held by the caller. Unexpected caller " - "for citus_internal_mark_node_not_synced"))); + "for citus_internal.mark_node_not_synced"))); } } diff --git a/src/backend/distributed/operations/delete_protocol.c b/src/backend/distributed/operations/delete_protocol.c index c36121b00..396517158 100644 --- a/src/backend/distributed/operations/delete_protocol.c +++ b/src/backend/distributed/operations/delete_protocol.c @@ -426,10 +426,9 @@ ExecuteDropShardPlacementCommandRemotely(ShardPlacement *shardPlacement, errdetail("Marking this shard placement for " "deletion"))); - InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - shardRelationName, - shardPlacement->groupId, - CLEANUP_DEFERRED_ON_SUCCESS); + InsertCleanupOnSuccessRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + shardRelationName, + shardPlacement->groupId); return; } diff --git a/src/backend/distributed/operations/shard_cleaner.c b/src/backend/distributed/operations/shard_cleaner.c index 790414530..2efce9a7b 100644 --- a/src/backend/distributed/operations/shard_cleaner.c +++ b/src/backend/distributed/operations/shard_cleaner.c @@ -92,6 +92,8 @@ static bool TryDropReplicationSlotOutsideTransaction(char *replicationSlotName, char *nodeName, int nodePort); static bool TryDropUserOutsideTransaction(char *username, char *nodeName, int nodePort); +static bool TryDropDatabaseOutsideTransaction(char *databaseName, char *nodeName, + int nodePort); static CleanupRecord * GetCleanupRecordByNameAndType(char *objectName, CleanupObject type); @@ -141,7 +143,6 @@ Datum citus_cleanup_orphaned_resources(PG_FUNCTION_ARGS) { CheckCitusVersion(ERROR); - EnsureCoordinator(); PreventInTransactionBlock(true, "citus_cleanup_orphaned_resources"); int droppedCount = DropOrphanedResourcesForCleanup(); @@ -245,12 +246,6 @@ TryDropOrphanedResources() static int DropOrphanedResourcesForCleanup() { - /* Only runs on Coordinator */ - if (!IsCoordinator()) - { - return 0; - } - List *cleanupRecordList = ListCleanupRecords(); /* @@ -452,15 +447,15 @@ CompareCleanupRecordsByObjectType(const void *leftElement, const void *rightElem /* - * InsertCleanupRecordInCurrentTransaction inserts a new pg_dist_cleanup entry + * InsertCleanupOnSuccessRecordInCurrentTransaction inserts a new pg_dist_cleanup entry * as part of the current transaction. This is primarily useful for deferred drop scenarios, - * since these records would roll back in case of operation failure. + * since these records would roll back in case of operation failure. And for the same reason, + * always sets the policy type to CLEANUP_DEFERRED_ON_SUCCESS. */ void -InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy) +InsertCleanupOnSuccessRecordInCurrentTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId) { /* We must have a valid OperationId. Any operation requring cleanup * will call RegisterOperationNeedingCleanup. @@ -482,7 +477,8 @@ InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, values[Anum_pg_dist_cleanup_object_type - 1] = Int32GetDatum(objectType); values[Anum_pg_dist_cleanup_object_name - 1] = CStringGetTextDatum(objectName); values[Anum_pg_dist_cleanup_node_group_id - 1] = Int32GetDatum(nodeGroupId); - values[Anum_pg_dist_cleanup_policy_type - 1] = Int32GetDatum(policy); + values[Anum_pg_dist_cleanup_policy_type - 1] = + Int32GetDatum(CLEANUP_DEFERRED_ON_SUCCESS); /* open cleanup relation and insert new tuple */ Oid relationId = DistCleanupRelationId(); @@ -499,23 +495,27 @@ InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, /* - * InsertCleanupRecordInSubtransaction inserts a new pg_dist_cleanup entry in a + * InsertCleanupRecordOutsideTransaction inserts a new pg_dist_cleanup entry in a * separate transaction to ensure the record persists after rollback. We should * delete these records if the operation completes successfully. * - * For failure scenarios, use a subtransaction (direct insert via localhost). + * This is used in scenarios where we need to cleanup resources on operation + * completion (CLEANUP_ALWAYS) or on failure (CLEANUP_ON_FAILURE). */ void -InsertCleanupRecordInSubtransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy) +InsertCleanupRecordOutsideTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId, + CleanupPolicy policy) { /* We must have a valid OperationId. Any operation requring cleanup * will call RegisterOperationNeedingCleanup. */ Assert(CurrentOperationId != INVALID_OPERATION_ID); + /* assert the circumstance noted in function comment */ + Assert(policy == CLEANUP_ALWAYS || policy == CLEANUP_ON_FAILURE); + StringInfo sequenceName = makeStringInfo(); appendStringInfo(sequenceName, "%s.%s", PG_CATALOG, @@ -603,6 +603,12 @@ TryDropResourceByCleanupRecordOutsideTransaction(CleanupRecord *record, return TryDropUserOutsideTransaction(record->objectName, nodeName, nodePort); } + case CLEANUP_OBJECT_DATABASE: + { + return TryDropDatabaseOutsideTransaction(record->objectName, nodeName, + nodePort); + } + default: { ereport(WARNING, (errmsg( @@ -883,6 +889,69 @@ TryDropUserOutsideTransaction(char *username, } +/* + * TryDropDatabaseOutsideTransaction drops the database with the given name + * if it exists. + */ +static bool +TryDropDatabaseOutsideTransaction(char *databaseName, char *nodeName, int nodePort) +{ + int connectionFlags = (OUTSIDE_TRANSACTION | FORCE_NEW_CONNECTION); + MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlags, + nodeName, nodePort, + CitusExtensionOwnerName(), + NULL); + + if (PQstatus(connection->pgConn) != CONNECTION_OK) + { + return false; + } + + /* + * We want to disable DDL propagation and set lock_timeout before issuing + * the DROP DATABASE command but we cannot do so in a way that's scoped + * to the DROP DATABASE command. This is because, we cannot use a + * transaction block for the DROP DATABASE command. + * + * For this reason, to avoid leaking the lock_timeout and DDL propagation + * settings to future commands, we force the connection to close at the end + * of the transaction. + */ + ForceConnectionCloseAtTransactionEnd(connection); + + /* + * The DROP DATABASE command should not propagate, so we disable DDL + * propagation. + */ + List *commandList = list_make3( + "SET lock_timeout TO '1s'", + "SET citus.enable_ddl_propagation TO OFF;", + psprintf("DROP DATABASE IF EXISTS %s;", quote_identifier(databaseName)) + ); + + bool executeCommand = true; + + const char *commandString = NULL; + foreach_ptr(commandString, commandList) + { + /* + * Cannot use SendOptionalCommandListToWorkerOutsideTransactionWithConnection() + * because we don't want to open a transaction block on remote nodes as DROP + * DATABASE commands cannot be run inside a transaction block. + */ + if (ExecuteOptionalRemoteCommand(connection, commandString, NULL) != + RESPONSE_OKAY) + { + executeCommand = false; + break; + } + } + + CloseConnection(connection); + return executeCommand; +} + + /* * ErrorIfCleanupRecordForShardExists errors out if a cleanup record for the given * shard name exists. diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index d1868d3c4..03dc4c1b8 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -384,6 +384,7 @@ CheckRebalanceStateInvariants(const RebalanceState *state) Assert(shardCost->cost <= prevShardCost->cost); } totalCost += shardCost->cost; + prevShardCost = shardCost; } /* Check that utilization field is up to date. */ diff --git a/src/backend/distributed/operations/shard_split.c b/src/backend/distributed/operations/shard_split.c index cf9f301b7..4baf0fb24 100644 --- a/src/backend/distributed/operations/shard_split.c +++ b/src/backend/distributed/operations/shard_split.c @@ -733,11 +733,11 @@ CreateSplitShardsForShardGroup(List *shardGroupSplitIntervalListList, workerPlacementNode->workerPort))); } - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName( - shardInterval), - workerPlacementNode->groupId, - CLEANUP_ON_FAILURE); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName( + shardInterval), + workerPlacementNode->groupId, + CLEANUP_ON_FAILURE); /* Create new split child shard on the specified placement list */ CreateObjectOnPlacement(splitShardCreationCommandList, @@ -1314,7 +1314,7 @@ DropShardListMetadata(List *shardIntervalList) { ListCell *commandCell = NULL; - /* send the commands one by one (calls citus_internal_delete_shard_metadata internally) */ + /* send the commands one by one (calls citus_internal.delete_shard_metadata internally) */ List *shardMetadataDeleteCommandList = ShardDeleteCommandList(shardInterval); foreach(commandCell, shardMetadataDeleteCommandList) { @@ -1717,11 +1717,11 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList, /* Log shard in pg_dist_cleanup. Given dummy shards are transient resources, * we want to cleanup irrespective of operation success or failure. */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName( - shardInterval), - workerPlacementNode->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName( + shardInterval), + workerPlacementNode->groupId, + CLEANUP_ALWAYS); /* Create dummy source shard on the specified placement list */ CreateObjectOnPlacement(splitShardCreationCommandList, @@ -1780,11 +1780,11 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList, /* Log shard in pg_dist_cleanup. Given dummy shards are transient resources, * we want to cleanup irrespective of operation success or failure. */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName( - shardInterval), - sourceWorkerNode->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName( + shardInterval), + sourceWorkerNode->groupId, + CLEANUP_ALWAYS); /* Create dummy split child shard on source worker node */ CreateObjectOnPlacement(splitShardCreationCommandList, sourceWorkerNode); diff --git a/src/backend/distributed/operations/shard_transfer.c b/src/backend/distributed/operations/shard_transfer.c index 805ef39d7..737086752 100644 --- a/src/backend/distributed/operations/shard_transfer.c +++ b/src/backend/distributed/operations/shard_transfer.c @@ -294,6 +294,17 @@ citus_move_shard_placement(PG_FUNCTION_ARGS) CheckCitusVersion(ERROR); EnsureCoordinator(); + List *referenceTableIdList = NIL; + + if (HasNodesWithMissingReferenceTables(&referenceTableIdList)) + { + ereport(ERROR, (errmsg("there are missing reference tables on some nodes"), + errhint("Copy reference tables first with " + "replicate_reference_tables() or use " + "citus_rebalance_start() that will do it automatically." + ))); + } + int64 shardId = PG_GETARG_INT64(0); char *sourceNodeName = text_to_cstring(PG_GETARG_TEXT_P(1)); int32 sourceNodePort = PG_GETARG_INT32(2); @@ -593,10 +604,10 @@ InsertDeferredDropCleanupRecordsForShards(List *shardIntervalList) * We also log cleanup record in the current transaction. If the current transaction rolls back, * we do not generate a record at all. */ - InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - qualifiedShardName, - placement->groupId, - CLEANUP_DEFERRED_ON_SUCCESS); + InsertCleanupOnSuccessRecordInCurrentTransaction( + CLEANUP_OBJECT_SHARD_PLACEMENT, + qualifiedShardName, + placement->groupId); } } } @@ -623,10 +634,9 @@ InsertCleanupRecordsForShardPlacementsOnNode(List *shardIntervalList, * We also log cleanup record in the current transaction. If the current transaction rolls back, * we do not generate a record at all. */ - InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - qualifiedShardName, - groupId, - CLEANUP_DEFERRED_ON_SUCCESS); + InsertCleanupOnSuccessRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + qualifiedShardName, + groupId); } } @@ -1382,10 +1392,11 @@ CopyShardTablesViaLogicalReplication(List *shardIntervalList, char *sourceNodeNa char *tableOwner = TableOwner(shardInterval->relationId); /* drop the shard we created on the target, in case of failure */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName(shardInterval), - GroupForNode(targetNodeName, targetNodePort), - CLEANUP_ON_FAILURE); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName(shardInterval), + GroupForNode(targetNodeName, + targetNodePort), + CLEANUP_ON_FAILURE); SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, tableOwner, @@ -1455,10 +1466,11 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, char *tableOwner = TableOwner(shardInterval->relationId); /* drop the shard we created on the target, in case of failure */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName(shardInterval), - GroupForNode(targetNodeName, targetNodePort), - CLEANUP_ON_FAILURE); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName(shardInterval), + GroupForNode(targetNodeName, + targetNodePort), + CLEANUP_ON_FAILURE); SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, tableOwner, ddlCommandList); @@ -2035,7 +2047,7 @@ UpdateColocatedShardPlacementMetadataOnWorkers(int64 shardId, StringInfo updateCommand = makeStringInfo(); appendStringInfo(updateCommand, - "SELECT citus_internal_update_placement_metadata(%ld, %d, %d)", + "SELECT citus_internal.update_placement_metadata(%ld, %d, %d)", colocatedShard->shardId, sourceGroupId, targetGroupId); SendCommandToWorkersWithMetadata(updateCommand->data); diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index f17b02347..4a79dc25a 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -91,6 +91,10 @@ bool InDelegatedFunctionCall = false; static bool contain_param_walker(Node *node, void *context) { + if (node == NULL) + { + return false; + } if (IsA(node, Param)) { Param *paramNode = (Param *) node; diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c index 60d6ce466..155880253 100644 --- a/src/backend/distributed/planner/insert_select_planner.c +++ b/src/backend/distributed/planner/insert_select_planner.c @@ -1810,6 +1810,8 @@ CastExpr(Expr *expr, Oid sourceType, Oid targetType, Oid targetCollation, ereport(ERROR, (errmsg("could not find a conversion path from type %d to %d", sourceType, targetType))); } + + return NULL; /* keep compiler happy */ } diff --git a/src/backend/distributed/planner/merge_planner.c b/src/backend/distributed/planner/merge_planner.c index 4d64b8f56..f8a181546 100644 --- a/src/backend/distributed/planner/merge_planner.c +++ b/src/backend/distributed/planner/merge_planner.c @@ -182,14 +182,6 @@ CreateRouterMergePlan(Oid targetRelationId, Query *originalQuery, Query *query, return distributedPlan; } - Var *insertVar = - FetchAndValidateInsertVarIfExists(targetRelationId, originalQuery); - if (insertVar && - !IsDistributionColumnInMergeSource((Expr *) insertVar, originalQuery, true)) - { - ereport(ERROR, (errmsg("MERGE INSERT must use the source table " - "distribution column value"))); - } Job *job = RouterJob(originalQuery, plannerRestrictionContext, &distributedPlan->planningError); @@ -251,14 +243,27 @@ CreateNonPushableMergePlan(Oid targetRelationId, uint64 planId, Query *originalQ CitusTableCacheEntry *targetRelation = GetCitusTableCacheEntry(targetRelationId); - /* - * Get the index of the column in the source query that will be utilized - * to repartition the source rows, ensuring colocation with the target - */ - distributedPlan->sourceResultRepartitionColumnIndex = - SourceResultPartitionColumnIndex(mergeQuery, - sourceQuery->targetList, - targetRelation); + + if (IsCitusTableType(targetRelation->relationId, SINGLE_SHARD_DISTRIBUTED)) + { + /* + * if target table is SINGLE_SHARD_DISTRIBUTED let's set this to invalid -1 + * so later in execution phase we don't rely on this value and try to find single shard of target instead. + */ + distributedPlan->sourceResultRepartitionColumnIndex = -1; + } + else + { + /* + * Get the index of the column in the source query that will be utilized + * to repartition the source rows, ensuring colocation with the target + */ + + distributedPlan->sourceResultRepartitionColumnIndex = + SourceResultPartitionColumnIndex(mergeQuery, + sourceQuery->targetList, + targetRelation); + } /* * Make a copy of the source query, since following code scribbles it @@ -270,11 +275,11 @@ CreateNonPushableMergePlan(Oid targetRelationId, uint64 planId, Query *originalQ int cursorOptions = CURSOR_OPT_PARALLEL_OK; PlannedStmt *sourceRowsPlan = pg_plan_query(sourceQueryCopy, NULL, cursorOptions, boundParams); - bool repartitioned = IsRedistributablePlan(sourceRowsPlan->planTree) && - IsSupportedRedistributionTarget(targetRelationId); + bool isRepartitionAllowed = IsRedistributablePlan(sourceRowsPlan->planTree) && + IsSupportedRedistributionTarget(targetRelationId); /* If plan is distributed, no work at the coordinator */ - if (repartitioned) + if (isRepartitionAllowed) { distributedPlan->modifyWithSelectMethod = MODIFY_WITH_SELECT_REPARTITION; } @@ -853,7 +858,7 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte, newRangeTableRef->rtindex = SINGLE_RTE_INDEX; sourceResultsQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL); sourceResultsQuery->targetList = - CreateAllTargetListForRelation(sourceRte->relid, requiredAttributes); + CreateFilteredTargetListForRelation(sourceRte->relid, requiredAttributes); List *restrictionList = GetRestrictInfoListForRelation(sourceRte, plannerRestrictionContext); List *copyRestrictionList = copyObject(restrictionList); @@ -1124,6 +1129,27 @@ DeferErrorIfRoutableMergeNotSupported(Query *query, List *rangeTableList, "repartitioning"))); return deferredError; } + + + /* + * If execution has reached this point, it indicates that the query can be delegated to the worker. + * However, before proceeding with this delegation, we need to confirm that the user is utilizing + * the distribution column of the source table in the Insert variable. + * If this is not the case, we should refrain from pushing down the query. + * This is just a deffered error which will be handle by caller. + */ + + Var *insertVar = + FetchAndValidateInsertVarIfExists(targetRelationId, query); + if (insertVar && + !IsDistributionColumnInMergeSource((Expr *) insertVar, query, true)) + { + ereport(DEBUG1, (errmsg( + "MERGE INSERT must use the source table distribution column value for push down to workers. Otherwise, repartitioning will be applied"))); + return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, + "MERGE INSERT must use the source table distribution column value for push down to workers. Otherwise, repartitioning will be applied", + NULL, NULL); + } return NULL; } @@ -1260,13 +1286,6 @@ static int SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList, CitusTableCacheEntry *targetRelation) { - if (IsCitusTableType(targetRelation->relationId, SINGLE_SHARD_DISTRIBUTED)) - { - ereport(ERROR, (errmsg("MERGE operation across distributed schemas " - "or with a row-based distributed table is " - "not yet supported"))); - } - /* Get all the Join conditions from the ON clause */ List *mergeJoinConditionList = WhereClauseList(mergeQuery->jointree); Var *targetColumn = targetRelation->partitionColumn; diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index 4584e7740..db30f4b60 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -190,6 +190,14 @@ PG_FUNCTION_INFO_V1(worker_save_query_explain_analyze); void CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es) { +#if PG_VERSION_NUM >= PG_VERSION_16 + if (es->generic) + { + ereport(ERROR, (errmsg( + "EXPLAIN GENERIC_PLAN is currently not supported for Citus tables"))); + } +#endif + CitusScanState *scanState = (CitusScanState *) node; DistributedPlan *distributedPlan = scanState->distributedPlan; EState *executorState = ScanStateGetExecutorState(scanState); @@ -992,18 +1000,12 @@ BuildRemoteExplainQuery(char *queryString, ExplainState *es) appendStringInfo(explainQuery, "EXPLAIN (ANALYZE %s, VERBOSE %s, " "COSTS %s, BUFFERS %s, WAL %s, " -#if PG_VERSION_NUM >= PG_VERSION_16 - "GENERIC_PLAN %s, " -#endif "TIMING %s, SUMMARY %s, FORMAT %s) %s", es->analyze ? "TRUE" : "FALSE", es->verbose ? "TRUE" : "FALSE", es->costs ? "TRUE" : "FALSE", es->buffers ? "TRUE" : "FALSE", es->wal ? "TRUE" : "FALSE", -#if PG_VERSION_NUM >= PG_VERSION_16 - es->generic ? "TRUE" : "FALSE", -#endif es->timing ? "TRUE" : "FALSE", es->summary ? "TRUE" : "FALSE", formatStr, diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index 76e38237a..28680deb0 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -1557,9 +1557,10 @@ MasterAggregateMutator(Node *originalNode, MasterAggregateWalkerContext *walkerC } else if (IsA(originalNode, Var)) { - Var *newColumn = copyObject((Var *) originalNode); - newColumn->varno = masterTableId; - newColumn->varattno = walkerContext->columnId; + Var *origColumn = (Var *) originalNode; + Var *newColumn = makeVar(masterTableId, walkerContext->columnId, + origColumn->vartype, origColumn->vartypmod, + origColumn->varcollid, origColumn->varlevelsup); walkerContext->columnId++; newNode = (Node *) newColumn; @@ -4753,22 +4754,35 @@ WorkerLimitCount(Node *limitCount, Node *limitOffset, OrderByLimitReference if (workerLimitNode != NULL && limitOffset != NULL) { Const *workerLimitConst = (Const *) workerLimitNode; - Const *workerOffsetConst = (Const *) limitOffset; - int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue); - int64 workerOffsetCount = DatumGetInt64(workerOffsetConst->constvalue); - workerLimitCount = workerLimitCount + workerOffsetCount; - workerLimitNode = (Node *) MakeIntegerConstInt64(workerLimitCount); + /* Only update the worker limit if the const is not null.*/ + if (!workerLimitConst->constisnull) + { + Const *workerOffsetConst = (Const *) limitOffset; + int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue); + + /* If the offset is null, it defaults to 0 when cast to int64. */ + int64 workerOffsetCount = DatumGetInt64(workerOffsetConst->constvalue); + workerLimitCount = workerLimitCount + workerOffsetCount; + workerLimitNode = (Node *) MakeIntegerConstInt64(workerLimitCount); + } } /* display debug message on limit push down */ if (workerLimitNode != NULL) { Const *workerLimitConst = (Const *) workerLimitNode; - int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue); + if (!workerLimitConst->constisnull) + { + int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue); - ereport(DEBUG1, (errmsg("push down of limit count: " INT64_FORMAT, - workerLimitCount))); + ereport(DEBUG1, (errmsg("push down of limit count: " INT64_FORMAT, + workerLimitCount))); + } + else + { + ereport(DEBUG1, (errmsg("push down of limit count: ALL"))); + } } return workerLimitNode; diff --git a/src/backend/distributed/planner/query_colocation_checker.c b/src/backend/distributed/planner/query_colocation_checker.c index bef91618e..d298b0f46 100644 --- a/src/backend/distributed/planner/query_colocation_checker.c +++ b/src/backend/distributed/planner/query_colocation_checker.c @@ -45,8 +45,6 @@ static RangeTblEntry * AnchorRte(Query *subquery); static List * UnionRelationRestrictionLists(List *firstRelationList, List *secondRelationList); -static List * CreateFilteredTargetListForRelation(Oid relationId, - List *requiredAttributes); static List * CreateDummyTargetList(Oid relationId, List *requiredAttributes); static TargetEntry * CreateTargetEntryForColumn(Form_pg_attribute attributeTuple, Index rteIndex, @@ -378,7 +376,7 @@ CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes) * only the required columns of the given relation. If there is not required * columns then a dummy NULL column is put as the only entry. */ -static List * +List * CreateFilteredTargetListForRelation(Oid relationId, List *requiredAttributes) { Relation relation = relation_open(relationId, AccessShareLock); diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index 056bc9a45..08e6c5573 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -1335,10 +1335,10 @@ CreatePublications(MultiConnection *connection, WorkerNode *worker = FindWorkerNode(connection->hostname, connection->port); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_PUBLICATION, - entry->name, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_PUBLICATION, + entry->name, + worker->groupId, + CLEANUP_ALWAYS); ExecuteCriticalRemoteCommand(connection, DISABLE_DDL_PROPAGATION); ExecuteCriticalRemoteCommand(connection, createPublicationCommand->data); @@ -1435,10 +1435,10 @@ CreateReplicationSlots(MultiConnection *sourceConnection, WorkerNode *worker = FindWorkerNode(sourceConnection->hostname, sourceConnection->port); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_REPLICATION_SLOT, - replicationSlot->name, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_REPLICATION_SLOT, + replicationSlot->name, + worker->groupId, + CLEANUP_ALWAYS); if (!firstReplicationSlot) { @@ -1506,10 +1506,10 @@ CreateSubscriptions(MultiConnection *sourceConnection, quote_identifier(GetUserNameFromId(ownerId, false)) ))); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_USER, - target->subscriptionOwnerName, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_USER, + target->subscriptionOwnerName, + worker->groupId, + CLEANUP_ALWAYS); StringInfo conninfo = makeStringInfo(); appendStringInfo(conninfo, "host='%s' port=%d user='%s' dbname='%s' " @@ -1567,10 +1567,10 @@ CreateSubscriptions(MultiConnection *sourceConnection, pfree(createSubscriptionCommand->data); pfree(createSubscriptionCommand); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SUBSCRIPTION, - target->subscriptionName, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SUBSCRIPTION, + target->subscriptionName, + worker->groupId, + CLEANUP_ALWAYS); ExecuteCriticalRemoteCommand(target->superuserConnection, psprintf( "ALTER SUBSCRIPTION %s OWNER TO %s", diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index ad5a14a25..6d26b802f 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -895,22 +895,13 @@ DecrementExternalClientBackendCounterAtExit(int code, Datum arg) static void CreateRequiredDirectories(void) { - const char *subdirs[] = { - "pg_foreign_file", - "pg_foreign_file/cached", - ("base/" PG_JOB_CACHE_DIR) - }; + const char *subdir = ("base/" PG_JOB_CACHE_DIR); - for (int dirNo = 0; dirNo < lengthof(subdirs); dirNo++) + if (MakePGDirectory(subdir) != 0 && errno != EEXIST) { - int ret = mkdir(subdirs[dirNo], S_IRWXU); - - if (ret != 0 && errno != EEXIST) - { - ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create directory \"%s\": %m", - subdirs[dirNo]))); - } + ereport(ERROR, (errcode_for_file_access(), + errmsg("could not create directory \"%s\": %m", + subdir))); } } @@ -2899,14 +2890,27 @@ ApplicationNameAssignHook(const char *newval, void *extra) DetermineCitusBackendType(newval); /* - * AssignGlobalPID might read from catalog tables to get the the local - * nodeid. But ApplicationNameAssignHook might be called before catalog - * access is available to the backend (such as in early stages of - * authentication). We use StartupCitusBackend to initialize the global pid - * after catalogs are available. After that happens this hook becomes - * responsible to update the global pid on later application_name changes. - * So we set the FinishedStartupCitusBackend flag in StartupCitusBackend to - * indicate when this responsibility handoff has happened. + * We use StartupCitusBackend to initialize the global pid after catalogs + * are available. After that happens this hook becomes responsible to update + * the global pid on later application_name changes. So we set the + * FinishedStartupCitusBackend flag in StartupCitusBackend to indicate when + * this responsibility handoff has happened. + * + * Also note that when application_name changes, we don't actually need to + * try re-assigning the global pid for external client backends and + * background workers because application_name doesn't affect the global + * pid for such backends - note that !IsExternalClientBackend() check covers + * both types of backends. Plus, + * trying to re-assign the global pid for such backends would unnecessarily + * cause performing a catalog access when the cached local node id is + * invalidated. However, accessing to the catalog tables is dangerous in + * certain situations like when we're not in a transaction block. And for + * the other types of backends, i.e., the Citus internal backends, we need + * to re-assign the global pid when the application_name changes because for + * such backends we simply extract the global pid inherited from the + * originating backend from the application_name -that's specified by + * originating backend when openning that connection- and this doesn't require + * catalog access. * * Another solution to the catalog table acccess problem would be to update * global pid lazily, like we do for HideShards. But that's not possible @@ -2916,7 +2920,7 @@ ApplicationNameAssignHook(const char *newval, void *extra) * as reasonably possible, which is also why we extract global pids in the * AuthHook already (extracting doesn't require catalog access). */ - if (FinishedStartupCitusBackend) + if (FinishedStartupCitusBackend && !IsExternalClientBackend()) { AssignGlobalPID(newval); } @@ -2938,6 +2942,7 @@ NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source) #if defined(ENABLE_GSS) && defined(ENABLE_SSPI) "gsslib", #endif + "host", "keepalives", "keepalives_count", "keepalives_idle", diff --git a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql index bc99060f1..1bec0f429 100644 --- a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql +++ b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql @@ -7,6 +7,8 @@ #include "udfs/start_management_transaction/12.2-1.sql" #include "udfs/execute_command_on_remote_nodes_as_user/12.2-1.sql" #include "udfs/mark_object_distributed/12.2-1.sql" +DROP FUNCTION pg_catalog.citus_unmark_object_distributed(oid, oid, int); +#include "udfs/citus_unmark_object_distributed/12.2-1.sql" #include "udfs/commit_management_command_2pc/12.2-1.sql" ALTER TABLE pg_catalog.pg_dist_transaction ADD COLUMN outer_xid xid8; @@ -29,3 +31,27 @@ REVOKE ALL ON FUNCTION citus_internal.start_management_transaction FROM PUBLIC; #include "udfs/citus_internal_add_colocation_metadata/12.2-1.sql" #include "udfs/citus_internal_add_object_metadata/12.2-1.sql" #include "udfs/citus_internal_add_partition_metadata/12.2-1.sql" +#include "udfs/citus_internal_add_placement_metadata/12.2-1.sql" +#include "udfs/citus_internal_add_shard_metadata/12.2-1.sql" +#include "udfs/citus_internal_add_tenant_schema/12.2-1.sql" +#include "udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql" +#include "udfs/citus_internal_delete_colocation_metadata/12.2-1.sql" +#include "udfs/citus_internal_delete_partition_metadata/12.2-1.sql" +#include "udfs/citus_internal_delete_placement_metadata/12.2-1.sql" +#include "udfs/citus_internal_delete_shard_metadata/12.2-1.sql" +#include "udfs/citus_internal_delete_tenant_schema/12.2-1.sql" +#include "udfs/citus_internal_local_blocked_processes/12.2-1.sql" +#include "udfs/citus_internal_global_blocked_processes/12.2-1.sql" +#include "udfs/citus_blocking_pids/12.2-1.sql" +#include "udfs/citus_isolation_test_session_is_blocked/12.2-1.sql" +DROP VIEW IF EXISTS pg_catalog.citus_lock_waits; +#include "udfs/citus_lock_waits/12.2-1.sql" + +#include "udfs/citus_internal_mark_node_not_synced/12.2-1.sql" +#include "udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql" +#include "udfs/citus_drop_trigger/12.2-1.sql" +#include "udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql" +#include "udfs/citus_internal_update_placement_metadata/12.2-1.sql" +#include "udfs/citus_internal_update_relation_colocation/12.2-1.sql" +#include "udfs/repl_origin_helper/12.2-1.sql" +#include "udfs/citus_finish_pg_upgrade/12.2-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql index 33df58b87..099bf8d87 100644 --- a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql @@ -1,6 +1,6 @@ -- citus--12.2-1--12.1-1 -DROP FUNCTION pg_catalog.citus_internal_database_command(text); +DROP FUNCTION citus_internal.database_command(text); DROP FUNCTION citus_internal.acquire_citus_advisory_object_class_lock(int, cstring); #include "../udfs/citus_add_rebalance_strategy/10.1-1.sql" @@ -18,6 +18,9 @@ DROP FUNCTION citus_internal.mark_object_distributed( classId Oid, objectName text, objectId Oid, connectionUser text ); +DROP FUNCTION pg_catalog.citus_unmark_object_distributed(oid,oid,int,boolean); +#include "../udfs/citus_unmark_object_distributed/10.0-1.sql" + DROP FUNCTION citus_internal.commit_management_command_2pc(); ALTER TABLE pg_catalog.pg_dist_transaction DROP COLUMN outer_xid; @@ -26,4 +29,29 @@ REVOKE USAGE ON SCHEMA citus_internal FROM PUBLIC; DROP FUNCTION citus_internal.add_colocation_metadata(int, int, int, regtype, oid); DROP FUNCTION citus_internal.add_object_metadata(text, text[], text[], integer, integer, boolean); DROP FUNCTION citus_internal.add_partition_metadata(regclass, "char", text, integer, "char"); +DROP FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint); +DROP FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text); +DROP FUNCTION citus_internal.add_tenant_schema(oid, integer); +DROP FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock); +DROP FUNCTION citus_internal.delete_colocation_metadata(int); +DROP FUNCTION citus_internal.delete_partition_metadata(regclass); +DROP FUNCTION citus_internal.delete_placement_metadata(bigint); +DROP FUNCTION citus_internal.delete_shard_metadata(bigint); +DROP FUNCTION citus_internal.delete_tenant_schema(oid); +DROP FUNCTION citus_internal.local_blocked_processes(); +#include "../udfs/citus_blocking_pids/11.0-1.sql" +#include "../udfs/citus_isolation_test_session_is_blocked/11.1-1.sql" +DROP VIEW IF EXISTS pg_catalog.citus_lock_waits; +#include "../udfs/citus_lock_waits/11.0-1.sql" +DROP FUNCTION citus_internal.global_blocked_processes(); +DROP FUNCTION citus_internal.mark_node_not_synced(int, int); +DROP FUNCTION citus_internal.unregister_tenant_schema_globally(oid, text); +#include "../udfs/citus_drop_trigger/12.0-1.sql" +DROP FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean); +DROP FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer); +DROP FUNCTION citus_internal.update_relation_colocation(oid, int); +DROP FUNCTION citus_internal.start_replication_origin_tracking(); +DROP FUNCTION citus_internal.stop_replication_origin_tracking(); +DROP FUNCTION citus_internal.is_replication_origin_tracking_active(); +#include "../udfs/citus_finish_pg_upgrade/12.1-1.sql" diff --git a/src/backend/distributed/sql/udfs/citus_blocking_pids/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_blocking_pids/12.2-1.sql new file mode 100644 index 000000000..4e747ff4f --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_blocking_pids/12.2-1.sql @@ -0,0 +1,34 @@ +DROP FUNCTION pg_catalog.citus_blocking_pids; +CREATE FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer) +RETURNS int4[] AS $$ + DECLARE + mLocalBlockingPids int4[]; + mRemoteBlockingPids int4[]; + mLocalGlobalPid int8; + BEGIN + SELECT pg_catalog.old_pg_blocking_pids(pBlockedPid) INTO mLocalBlockingPids; + + IF (array_length(mLocalBlockingPids, 1) > 0) THEN + RETURN mLocalBlockingPids; + END IF; + + -- pg says we're not blocked locally; check whether we're blocked globally. + SELECT global_pid INTO mLocalGlobalPid + FROM get_all_active_transactions() WHERE process_id = pBlockedPid; + + SELECT array_agg(global_pid) INTO mRemoteBlockingPids FROM ( + WITH activeTransactions AS ( + SELECT global_pid FROM get_all_active_transactions() + ), blockingTransactions AS ( + SELECT blocking_global_pid FROM citus_internal.global_blocked_processes() + WHERE waiting_global_pid = mLocalGlobalPid + ) + SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions + WHERE activeTransactions.global_pid = blockingTransactions.blocking_global_pid + ) AS sub; + + RETURN mRemoteBlockingPids; + END; +$$ LANGUAGE plpgsql; + +REVOKE ALL ON FUNCTION citus_blocking_pids(integer) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql b/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql index c7e607c1c..4e747ff4f 100644 --- a/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql @@ -20,7 +20,7 @@ RETURNS int4[] AS $$ WITH activeTransactions AS ( SELECT global_pid FROM get_all_active_transactions() ), blockingTransactions AS ( - SELECT blocking_global_pid FROM citus_internal_global_blocked_processes() + SELECT blocking_global_pid FROM citus_internal.global_blocked_processes() WHERE waiting_global_pid = mLocalGlobalPid ) SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions diff --git a/src/backend/distributed/sql/udfs/citus_drop_trigger/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_drop_trigger/12.2-1.sql new file mode 100644 index 000000000..6e4c52209 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_drop_trigger/12.2-1.sql @@ -0,0 +1,68 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger() + RETURNS event_trigger + LANGUAGE plpgsql + SET search_path = pg_catalog + AS $cdbdt$ +DECLARE + constraint_event_count INTEGER; + v_obj record; + dropped_table_is_a_partition boolean := false; +BEGIN + FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() + WHERE object_type IN ('table', 'foreign table') + LOOP + -- first drop the table and metadata on the workers + -- then drop all the shards on the workers + -- finally remove the pg_dist_partition entry on the coordinator + PERFORM master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name); + + -- If both original and normal values are false, the dropped table was a partition + -- that was dropped as a result of its parent being dropped + -- NOTE: the other way around is not true: + -- the table being a partition doesn't imply both original and normal values are false + SELECT (v_obj.original = false AND v_obj.normal = false) INTO dropped_table_is_a_partition; + + -- The partition's shards will be dropped when dropping the parent's shards, so we can skip: + -- i.e. we call citus_drop_all_shards with drop_shards_metadata_only parameter set to true + IF dropped_table_is_a_partition + THEN + PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := true); + ELSE + PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false); + END IF; + + PERFORM master_remove_partition_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name); + END LOOP; + + FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + -- Remove entries from pg_catalog.pg_dist_schema for all dropped tenant schemas. + -- Also delete the corresponding colocation group from pg_catalog.pg_dist_colocation. + -- + -- Although normally we automatically delete the colocation groups when they become empty, + -- we don't do so for the colocation groups that are created for tenant schemas. For this + -- reason, here we need to delete the colocation group when the tenant schema is dropped. + IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_schema WHERE schemaid = v_obj.objid) + THEN + PERFORM citus_internal.unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name); + END IF; + + -- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects + PERFORM master_unmark_object_distributed(v_obj.classid, v_obj.objid, v_obj.objsubid); + END LOOP; + + SELECT COUNT(*) INTO constraint_event_count + FROM pg_event_trigger_dropped_objects() + WHERE object_type IN ('table constraint'); + + IF constraint_event_count > 0 + THEN + -- Tell utility hook that a table constraint is dropped so we might + -- need to undistribute some of the citus local tables that are not + -- connected to any reference tables. + PERFORM notify_constraint_dropped(); + END IF; +END; +$cdbdt$; +COMMENT ON FUNCTION pg_catalog.citus_drop_trigger() + IS 'perform checks and actions at the end of DROP actions'; diff --git a/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql b/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql index 312099aeb..6e4c52209 100644 --- a/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql @@ -44,7 +44,7 @@ BEGIN -- reason, here we need to delete the colocation group when the tenant schema is dropped. IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_schema WHERE schemaid = v_obj.objid) THEN - PERFORM pg_catalog.citus_internal_unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name); + PERFORM citus_internal.unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name); END IF; -- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql index 7b7d357ff..816341c5d 100644 --- a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql @@ -96,7 +96,7 @@ END; IF all_nodes_can_connect_to_each_other != True THEN RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all ' - 'nodes are up and runnnig. Also, make sure that all nodes can connect ' + 'nodes are up and running. Also, make sure that all nodes can connect ' 'to each other. Use SELECT * FROM citus_check_cluster_node_health(); ' 'to check the cluster health'; ELSE diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql index 2b4bb17f6..4a253b151 100644 --- a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql @@ -96,7 +96,7 @@ END; IF all_nodes_can_connect_to_each_other != True THEN RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all ' - 'nodes are up and runnnig. Also, make sure that all nodes can connect ' + 'nodes are up and running. Also, make sure that all nodes can connect ' 'to each other. Use SELECT * FROM citus_check_cluster_node_health(); ' 'to check the cluster health'; ELSE diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-3.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-3.sql index fae94a04a..d6ba4a2b8 100644 --- a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-3.sql +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-3.sql @@ -96,7 +96,7 @@ END; IF all_nodes_can_connect_to_each_other != True THEN RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all ' - 'nodes are up and runnnig. Also, make sure that all nodes can connect ' + 'nodes are up and running. Also, make sure that all nodes can connect ' 'to each other. Use SELECT * FROM citus_check_cluster_node_health(); ' 'to check the cluster health'; ELSE diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql index fae94a04a..d6ba4a2b8 100644 --- a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql @@ -96,7 +96,7 @@ END; IF all_nodes_can_connect_to_each_other != True THEN RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all ' - 'nodes are up and runnnig. Also, make sure that all nodes can connect ' + 'nodes are up and running. Also, make sure that all nodes can connect ' 'to each other. Use SELECT * FROM citus_check_cluster_node_health(); ' 'to check the cluster health'; ELSE diff --git a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/12.2-1.sql new file mode 100644 index 000000000..4d3a17bd4 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/12.2-1.sql @@ -0,0 +1,227 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_finish_pg_upgrade() + RETURNS void + LANGUAGE plpgsql + SET search_path = pg_catalog + AS $cppu$ +DECLARE + table_name regclass; + command text; + trigger_name text; +BEGIN + + + IF substring(current_Setting('server_version'), '\d+')::int >= 14 THEN + EXECUTE $cmd$ + -- disable propagation to prevent EnsureCoordinator errors + -- the aggregate created here does not depend on Citus extension (yet) + -- since we add the dependency with the next command + SET citus.enable_ddl_propagation TO OFF; + CREATE AGGREGATE array_cat_agg(anycompatiblearray) (SFUNC = array_cat, STYPE = anycompatiblearray); + COMMENT ON AGGREGATE array_cat_agg(anycompatiblearray) + IS 'concatenate input arrays into a single array'; + RESET citus.enable_ddl_propagation; + $cmd$; + ELSE + EXECUTE $cmd$ + SET citus.enable_ddl_propagation TO OFF; + CREATE AGGREGATE array_cat_agg(anyarray) (SFUNC = array_cat, STYPE = anyarray); + COMMENT ON AGGREGATE array_cat_agg(anyarray) + IS 'concatenate input arrays into a single array'; + RESET citus.enable_ddl_propagation; + $cmd$; + END IF; + + -- + -- Citus creates the array_cat_agg but because of a compatibility + -- issue between pg13-pg14, we drop and create it during upgrade. + -- And as Citus creates it, there needs to be a dependency to the + -- Citus extension, so we create that dependency here. + -- We are not using: + -- ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg + -- because we don't have an easy way to check if the aggregate + -- exists with anyarray type or anycompatiblearray type. + + INSERT INTO pg_depend + SELECT + 'pg_proc'::regclass::oid as classid, + (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') as objid, + 0 as objsubid, + 'pg_extension'::regclass::oid as refclassid, + (select oid from pg_extension where extname = 'citus') as refobjid, + 0 as refobjsubid , + 'e' as deptype; + + -- PG16 has its own any_value, so only create it pre PG16. + -- We can remove this part when we drop support for PG16 + IF substring(current_Setting('server_version'), '\d+')::int < 16 THEN + EXECUTE $cmd$ + -- disable propagation to prevent EnsureCoordinator errors + -- the aggregate created here does not depend on Citus extension (yet) + -- since we add the dependency with the next command + SET citus.enable_ddl_propagation TO OFF; + CREATE OR REPLACE FUNCTION pg_catalog.any_value_agg ( anyelement, anyelement ) + RETURNS anyelement AS $$ + SELECT CASE WHEN $1 IS NULL THEN $2 ELSE $1 END; + $$ LANGUAGE SQL STABLE; + + CREATE AGGREGATE pg_catalog.any_value ( + sfunc = pg_catalog.any_value_agg, + combinefunc = pg_catalog.any_value_agg, + basetype = anyelement, + stype = anyelement + ); + COMMENT ON AGGREGATE pg_catalog.any_value(anyelement) IS + 'Returns the value of any row in the group. It is mostly useful when you know there will be only 1 element.'; + RESET citus.enable_ddl_propagation; + -- + -- Citus creates the any_value aggregate but because of a compatibility + -- issue between pg15-pg16 -- any_value is created in PG16, we drop + -- and create it during upgrade IF upgraded version is less than 16. + -- And as Citus creates it, there needs to be a dependency to the + -- Citus extension, so we create that dependency here. + + INSERT INTO pg_depend + SELECT + 'pg_proc'::regclass::oid as classid, + (SELECT oid FROM pg_proc WHERE proname = 'any_value_agg') as objid, + 0 as objsubid, + 'pg_extension'::regclass::oid as refclassid, + (select oid from pg_extension where extname = 'citus') as refobjid, + 0 as refobjsubid , + 'e' as deptype; + + INSERT INTO pg_depend + SELECT + 'pg_proc'::regclass::oid as classid, + (SELECT oid FROM pg_proc WHERE proname = 'any_value') as objid, + 0 as objsubid, + 'pg_extension'::regclass::oid as refclassid, + (select oid from pg_extension where extname = 'citus') as refobjid, + 0 as refobjsubid , + 'e' as deptype; + $cmd$; + END IF; + + -- + -- restore citus catalog tables + -- + INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition; + + -- if we are upgrading from PG14/PG15 to PG16+, + -- we need to regenerate the partkeys because they will include varnullingrels as well. + UPDATE pg_catalog.pg_dist_partition + SET partkey = column_name_to_column(pg_dist_partkeys_pre_16_upgrade.logicalrelid, col_name) + FROM public.pg_dist_partkeys_pre_16_upgrade + WHERE pg_dist_partkeys_pre_16_upgrade.logicalrelid = pg_dist_partition.logicalrelid; + DROP TABLE public.pg_dist_partkeys_pre_16_upgrade; + + INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard; + INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement; + INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata; + INSERT INTO pg_catalog.pg_dist_node SELECT * FROM public.pg_dist_node; + INSERT INTO pg_catalog.pg_dist_local_group SELECT * FROM public.pg_dist_local_group; + INSERT INTO pg_catalog.pg_dist_transaction SELECT * FROM public.pg_dist_transaction; + INSERT INTO pg_catalog.pg_dist_colocation SELECT * FROM public.pg_dist_colocation; + INSERT INTO pg_catalog.pg_dist_cleanup SELECT * FROM public.pg_dist_cleanup; + INSERT INTO pg_catalog.pg_dist_schema SELECT schemaname::regnamespace, colocationid FROM public.pg_dist_schema; + -- enterprise catalog tables + INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo; + INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo; + + -- Temporarily disable trigger to check for validity of functions while + -- inserting. The current contents of the table might be invalid if one of + -- the functions was removed by the user without also removing the + -- rebalance strategy. Obviously that's not great, but it should be no + -- reason to fail the upgrade. + ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; + INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT + name, + default_strategy, + shard_cost_function::regprocedure::regproc, + node_capacity_function::regprocedure::regproc, + shard_allowed_on_node_function::regprocedure::regproc, + default_threshold, + minimum_threshold, + improvement_threshold + FROM public.pg_dist_rebalance_strategy; + ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; + + -- + -- drop backup tables + -- + DROP TABLE public.pg_dist_authinfo; + DROP TABLE public.pg_dist_colocation; + DROP TABLE public.pg_dist_local_group; + DROP TABLE public.pg_dist_node; + DROP TABLE public.pg_dist_node_metadata; + DROP TABLE public.pg_dist_partition; + DROP TABLE public.pg_dist_placement; + DROP TABLE public.pg_dist_poolinfo; + DROP TABLE public.pg_dist_shard; + DROP TABLE public.pg_dist_transaction; + DROP TABLE public.pg_dist_rebalance_strategy; + DROP TABLE public.pg_dist_cleanup; + DROP TABLE public.pg_dist_schema; + -- + -- reset sequences + -- + PERFORM setval('pg_catalog.pg_dist_shardid_seq', (SELECT MAX(shardid)+1 AS max_shard_id FROM pg_dist_shard), false); + PERFORM setval('pg_catalog.pg_dist_placement_placementid_seq', (SELECT MAX(placementid)+1 AS max_placement_id FROM pg_dist_placement), false); + PERFORM setval('pg_catalog.pg_dist_groupid_seq', (SELECT MAX(groupid)+1 AS max_group_id FROM pg_dist_node), false); + PERFORM setval('pg_catalog.pg_dist_node_nodeid_seq', (SELECT MAX(nodeid)+1 AS max_node_id FROM pg_dist_node), false); + PERFORM setval('pg_catalog.pg_dist_colocationid_seq', (SELECT MAX(colocationid)+1 AS max_colocation_id FROM pg_dist_colocation), false); + PERFORM setval('pg_catalog.pg_dist_operationid_seq', (SELECT MAX(operation_id)+1 AS max_operation_id FROM pg_dist_cleanup), false); + PERFORM setval('pg_catalog.pg_dist_cleanup_recordid_seq', (SELECT MAX(record_id)+1 AS max_record_id FROM pg_dist_cleanup), false); + PERFORM setval('pg_catalog.pg_dist_clock_logical_seq', (SELECT last_value FROM public.pg_dist_clock_logical_seq), false); + DROP TABLE public.pg_dist_clock_logical_seq; + + + + -- + -- register triggers + -- + FOR table_name IN SELECT logicalrelid FROM pg_catalog.pg_dist_partition JOIN pg_class ON (logicalrelid = oid) WHERE relkind <> 'f' + LOOP + trigger_name := 'truncate_trigger_' || table_name::oid; + command := 'create trigger ' || trigger_name || ' after truncate on ' || table_name || ' execute procedure pg_catalog.citus_truncate_trigger()'; + EXECUTE command; + command := 'update pg_trigger set tgisinternal = true where tgname = ' || quote_literal(trigger_name); + EXECUTE command; + END LOOP; + + -- + -- set dependencies + -- + INSERT INTO pg_depend + SELECT + 'pg_class'::regclass::oid as classid, + p.logicalrelid::regclass::oid as objid, + 0 as objsubid, + 'pg_extension'::regclass::oid as refclassid, + (select oid from pg_extension where extname = 'citus') as refobjid, + 0 as refobjsubid , + 'n' as deptype + FROM pg_catalog.pg_dist_partition p; + + -- set dependencies for columnar table access method + PERFORM columnar_internal.columnar_ensure_am_depends_catalog(); + + -- restore pg_dist_object from the stable identifiers + TRUNCATE pg_catalog.pg_dist_object; + INSERT INTO pg_catalog.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid) + SELECT + address.classid, + address.objid, + address.objsubid, + naming.distribution_argument_index, + naming.colocationid + FROM + public.pg_dist_object naming, + pg_catalog.pg_get_object_address(naming.type, naming.object_names, naming.object_args) address; + + DROP TABLE public.pg_dist_object; +END; +$cppu$; + +COMMENT ON FUNCTION pg_catalog.citus_finish_pg_upgrade() + IS 'perform tasks to restore citus settings from a location that has been prepared before pg_upgrade'; diff --git a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql index 766e86a2e..4d3a17bd4 100644 --- a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql @@ -128,6 +128,12 @@ BEGIN INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo; INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo; + -- Temporarily disable trigger to check for validity of functions while + -- inserting. The current contents of the table might be invalid if one of + -- the functions was removed by the user without also removing the + -- rebalance strategy. Obviously that's not great, but it should be no + -- reason to fail the upgrade. + ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT name, default_strategy, @@ -138,6 +144,7 @@ BEGIN minimum_threshold, improvement_threshold FROM public.pg_dist_rebalance_strategy; + ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; -- -- drop backup tables diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql new file mode 100644 index 000000000..339fc2948 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql @@ -0,0 +1,36 @@ +-- create a new function, without shardstate +CREATE OR REPLACE FUNCTION citus_internal.add_placement_metadata( + shard_id bigint, + shard_length bigint, group_id integer, + placement_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint) IS + 'Inserts into pg_dist_shard_placement with user checks'; + +-- create a new function, without shardstate +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( + shard_id bigint, + shard_length bigint, group_id integer, + placement_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, bigint, integer, bigint) IS + 'Inserts into pg_dist_shard_placement with user checks'; + +-- replace the old one so it would call the old C function with shard_state +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( + shard_id bigint, shard_state integer, + shard_length bigint, group_id integer, + placement_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata_legacy$$; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, integer, bigint, integer, bigint) IS + 'Inserts into pg_dist_shard_placement with user checks'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql index 9d1dd4ffa..339fc2948 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql @@ -1,3 +1,15 @@ +-- create a new function, without shardstate +CREATE OR REPLACE FUNCTION citus_internal.add_placement_metadata( + shard_id bigint, + shard_length bigint, group_id integer, + placement_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint) IS + 'Inserts into pg_dist_shard_placement with user checks'; + -- create a new function, without shardstate CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( shard_id bigint, diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql new file mode 100644 index 000000000..82c29f054 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql @@ -0,0 +1,21 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_shard_metadata( + relation_id regclass, shard_id bigint, + storage_type "char", shard_min_value text, + shard_max_value text + ) + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata$$; +COMMENT ON FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text) IS + 'Inserts into pg_dist_shard with user checks'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata( + relation_id regclass, shard_id bigint, + storage_type "char", shard_min_value text, + shard_max_value text + ) + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_add_shard_metadata(regclass, bigint, "char", text, text) IS + 'Inserts into pg_dist_shard with user checks'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql index 7411d9179..82c29f054 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql @@ -1,3 +1,14 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_shard_metadata( + relation_id regclass, shard_id bigint, + storage_type "char", shard_min_value text, + shard_max_value text + ) + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata$$; +COMMENT ON FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text) IS + 'Inserts into pg_dist_shard with user checks'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata( relation_id regclass, shard_id bigint, storage_type "char", shard_min_value text, diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/12.2-1.sql new file mode 100644 index 000000000..028848f90 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/12.2-1.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_tenant_schema(schema_id Oid, colocation_id int) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_add_tenant_schema$$; + +COMMENT ON FUNCTION citus_internal.add_tenant_schema(Oid, int) IS + 'insert given tenant schema into pg_dist_schema with given colocation id'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_tenant_schema(schema_id Oid, colocation_id int) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_tenant_schema(Oid, int) IS + 'insert given tenant schema into pg_dist_schema with given colocation id'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/latest.sql index 56b3cae84..028848f90 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/latest.sql @@ -1,3 +1,12 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_tenant_schema(schema_id Oid, colocation_id int) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_add_tenant_schema$$; + +COMMENT ON FUNCTION citus_internal.add_tenant_schema(Oid, int) IS + 'insert given tenant schema into pg_dist_schema with given colocation id'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_tenant_schema(schema_id Oid, colocation_id int) RETURNS void LANGUAGE C diff --git a/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql new file mode 100644 index 000000000..36d37a9e6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) + RETURNS void + LANGUAGE C STABLE PARALLEL SAFE STRICT + AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$; +COMMENT ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) + IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster'; + +REVOKE ALL ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) + RETURNS void + LANGUAGE C STABLE PARALLEL SAFE STRICT + AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) + IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster'; + +REVOKE ALL ON FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/latest.sql index 240f7a9b7..36d37a9e6 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/latest.sql @@ -1,3 +1,12 @@ +CREATE OR REPLACE FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) + RETURNS void + LANGUAGE C STABLE PARALLEL SAFE STRICT + AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$; +COMMENT ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) + IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster'; + +REVOKE ALL ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) RETURNS void LANGUAGE C STABLE PARALLEL SAFE STRICT diff --git a/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql index 9f6d873cc..2c6e916c0 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql @@ -1,10 +1,10 @@ -- --- citus_internal_database_command run given database command without transaction block restriction. +-- citus_internal.database_command run given database command without transaction block restriction. -CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text) +CREATE OR REPLACE FUNCTION citus_internal.database_command(command text) RETURNS void LANGUAGE C VOLATILE AS 'MODULE_PATHNAME', $$citus_internal_database_command$$; -COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS +COMMENT ON FUNCTION citus_internal.database_command(text) IS 'run a database command without transaction block restrictions'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql index 9f6d873cc..2c6e916c0 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql @@ -1,10 +1,10 @@ -- --- citus_internal_database_command run given database command without transaction block restriction. +-- citus_internal.database_command run given database command without transaction block restriction. -CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text) +CREATE OR REPLACE FUNCTION citus_internal.database_command(command text) RETURNS void LANGUAGE C VOLATILE AS 'MODULE_PATHNAME', $$citus_internal_database_command$$; -COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS +COMMENT ON FUNCTION citus_internal.database_command(text) IS 'run a database command without transaction block restrictions'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/12.2-1.sql new file mode 100644 index 000000000..cb56a25cd --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/12.2-1.sql @@ -0,0 +1,19 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_colocation_metadata( + colocation_id int) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_colocation_metadata$$; + +COMMENT ON FUNCTION citus_internal.delete_colocation_metadata(int) IS + 'deletes a co-location group from pg_dist_colocation'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_colocation_metadata( + colocation_id int) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_colocation_metadata(int) IS + 'deletes a co-location group from pg_dist_colocation'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/latest.sql index d4c3f1be9..cb56a25cd 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/latest.sql @@ -1,3 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_colocation_metadata( + colocation_id int) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_colocation_metadata$$; + +COMMENT ON FUNCTION citus_internal.delete_colocation_metadata(int) IS + 'deletes a co-location group from pg_dist_colocation'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_colocation_metadata( colocation_id int) RETURNS void diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/12.2-1.sql new file mode 100644 index 000000000..693815abf --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/12.2-1.sql @@ -0,0 +1,14 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_partition_metadata(table_name regclass) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_partition_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_partition_metadata(regclass) IS + 'Deletes a row from pg_dist_partition with table ownership checks'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_partition_metadata(table_name regclass) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_partition_metadata(regclass) IS + 'Deletes a row from pg_dist_partition with table ownership checks'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/latest.sql index c7cb5455d..693815abf 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_partition_metadata(table_name regclass) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_partition_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_partition_metadata(regclass) IS + 'Deletes a row from pg_dist_partition with table ownership checks'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_partition_metadata(table_name regclass) RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.2-1.sql new file mode 100644 index 000000000..f78c9a08e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.2-1.sql @@ -0,0 +1,19 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_placement_metadata( + placement_id bigint) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', +$$citus_internal_delete_placement_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_placement_metadata(bigint) + IS 'Delete placement with given id from pg_dist_placement metadata table.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_placement_metadata( + placement_id bigint) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', +$$citus_internal_delete_placement_metadata$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_placement_metadata(bigint) + IS 'Delete placement with given id from pg_dist_placement metadata table.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql index 5af65f0be..f78c9a08e 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql @@ -1,3 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_placement_metadata( + placement_id bigint) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', +$$citus_internal_delete_placement_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_placement_metadata(bigint) + IS 'Delete placement with given id from pg_dist_placement metadata table.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_placement_metadata( placement_id bigint) RETURNS void diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/12.2-1.sql new file mode 100644 index 000000000..bcd121b0d --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/12.2-1.sql @@ -0,0 +1,14 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_shard_metadata(shard_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_shard_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_shard_metadata(bigint) IS + 'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_shard_metadata(shard_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_shard_metadata(bigint) IS + 'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/latest.sql index 7bfd86bdd..bcd121b0d 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_shard_metadata(shard_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_shard_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_shard_metadata(bigint) IS + 'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_shard_metadata(shard_id bigint) RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/12.2-1.sql new file mode 100644 index 000000000..2c36108b4 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/12.2-1.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_tenant_schema(schema_id Oid) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_delete_tenant_schema$$; + +COMMENT ON FUNCTION citus_internal.delete_tenant_schema(Oid) IS + 'delete given tenant schema from pg_dist_schema'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_tenant_schema(schema_id Oid) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_tenant_schema(Oid) IS + 'delete given tenant schema from pg_dist_schema'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/latest.sql index 4a2bf0067..2c36108b4 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/latest.sql @@ -1,3 +1,12 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_tenant_schema(schema_id Oid) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_delete_tenant_schema$$; + +COMMENT ON FUNCTION citus_internal.delete_tenant_schema(Oid) IS + 'delete given tenant schema from pg_dist_schema'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_tenant_schema(schema_id Oid) RETURNS void LANGUAGE C diff --git a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql new file mode 100644 index 000000000..da8e98c20 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql @@ -0,0 +1,35 @@ +CREATE OR REPLACE FUNCTION citus_internal.global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql index 510cdf93d..da8e98c20 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql @@ -1,3 +1,21 @@ +CREATE OR REPLACE FUNCTION citus_internal.global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes( OUT waiting_global_pid int8, OUT waiting_pid int4, diff --git a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql new file mode 100644 index 000000000..b27f16d53 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql @@ -0,0 +1,35 @@ +CREATE OR REPLACE FUNCTION citus_internal.local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql index 3157a9aad..b27f16d53 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql @@ -1,3 +1,21 @@ +CREATE OR REPLACE FUNCTION citus_internal.local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes( OUT waiting_global_pid int8, OUT waiting_pid int4, diff --git a/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql new file mode 100644 index 000000000..8635b9699 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql @@ -0,0 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.mark_node_not_synced(parent_pid int, nodeid int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$; +COMMENT ON FUNCTION citus_internal.mark_node_not_synced(int, int) + IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$; +COMMENT ON FUNCTION citus_internal_mark_node_not_synced(int, int) + IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql index 0d90c8f1a..8635b9699 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.mark_node_not_synced(parent_pid int, nodeid int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$; +COMMENT ON FUNCTION citus_internal.mark_node_not_synced(int, int) + IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int) RETURNS VOID LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql new file mode 100644 index 000000000..b07eb425d --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql @@ -0,0 +1,15 @@ +CREATE OR REPLACE FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_unregister_tenant_schema_globally$$; +COMMENT ON FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS + 'Delete a tenant schema and the corresponding colocation group from metadata tables.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS + 'Delete a tenant schema and the corresponding colocation group from metadata tables.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql index 1863f1ddf..b07eb425d 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql @@ -1,3 +1,11 @@ +CREATE OR REPLACE FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_unregister_tenant_schema_globally$$; +COMMENT ON FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS + 'Delete a tenant schema and the corresponding colocation group from metadata tables.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) RETURNS void LANGUAGE C diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql new file mode 100644 index 000000000..cab960544 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql @@ -0,0 +1,23 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_none_dist_table_metadata( + relation_id oid, + replication_model "char", + colocation_id bigint, + auto_converted boolean) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_update_none_dist_table_metadata$$; +COMMENT ON FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean) + IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata( + relation_id oid, + replication_model "char", + colocation_id bigint, + auto_converted boolean) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(oid, "char", bigint, boolean) + IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql index bcd05d8d0..cab960544 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql @@ -1,3 +1,15 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_none_dist_table_metadata( + relation_id oid, + replication_model "char", + colocation_id bigint, + auto_converted boolean) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_update_none_dist_table_metadata$$; +COMMENT ON FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean) + IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata( relation_id oid, replication_model "char", diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql new file mode 100644 index 000000000..b7c473647 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql @@ -0,0 +1,19 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_placement_metadata( + shard_id bigint, source_group_id integer, + target_group_id integer) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer) IS + 'Updates into pg_dist_placement with user checks'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_placement_metadata( + shard_id bigint, source_group_id integer, + target_group_id integer) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_update_placement_metadata(bigint, integer, integer) IS + 'Updates into pg_dist_placement with user checks'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql index 7cb717740..b7c473647 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql @@ -1,3 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_placement_metadata( + shard_id bigint, source_group_id integer, + target_group_id integer) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer) IS + 'Updates into pg_dist_placement with user checks'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_placement_metadata( shard_id bigint, source_group_id integer, target_group_id integer) diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql new file mode 100644 index 000000000..226689529 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql @@ -0,0 +1,14 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_relation_colocation(relation_id Oid, target_colocation_id int) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_relation_colocation$$; +COMMENT ON FUNCTION citus_internal.update_relation_colocation(oid, int) IS + 'Updates colocationId field of pg_dist_partition for the relation_id'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_relation_colocation(relation_id Oid, target_colocation_id int) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_update_relation_colocation(oid, int) IS + 'Updates colocationId field of pg_dist_partition for the relation_id'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql index a7f2ec1c6..226689529 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_relation_colocation(relation_id Oid, target_colocation_id int) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_relation_colocation$$; +COMMENT ON FUNCTION citus_internal.update_relation_colocation(oid, int) IS + 'Updates colocationId field of pg_dist_partition for the relation_id'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_relation_colocation(relation_id Oid, target_colocation_id int) RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql new file mode 100644 index 000000000..6f494fa74 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql @@ -0,0 +1,45 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[]) +RETURNS boolean AS $$ + DECLARE + mBlockedGlobalPid int8; + workerProcessId integer := current_setting('citus.isolation_test_session_remote_process_id'); + coordinatorProcessId integer := current_setting('citus.isolation_test_session_process_id'); + BEGIN + IF pg_catalog.old_pg_isolation_test_session_is_blocked(pBlockedPid, pInterestingPids) THEN + RETURN true; + END IF; + + -- pg says we're not blocked locally; check whether we're blocked globally. + -- Note that worker process may be blocked or waiting for a lock. So we need to + -- get transaction number for both of them. Following IF provides the transaction + -- number when the worker process waiting for other session. + IF EXISTS (SELECT 1 FROM get_global_active_transactions() + WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId) THEN + SELECT global_pid INTO mBlockedGlobalPid FROM get_global_active_transactions() + WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId; + ELSE + -- Check whether transactions initiated from the coordinator get locked + SELECT global_pid INTO mBlockedGlobalPid + FROM get_all_active_transactions() WHERE process_id = pBlockedPid; + END IF; + + -- We convert the blocking_global_pid to a regular pid and only look at + -- blocks caused by the interesting pids, or the workerProcessPid. If we + -- don't do that we might find unrelated blocks caused by some random + -- other processes that are not involved in this isolation test. Because we + -- run our isolation tests on a single physical machine, the PID part of + -- the GPID is known to be unique within the whole cluster. + RETURN EXISTS ( + SELECT 1 FROM citus_internal.global_blocked_processes() + WHERE waiting_global_pid = mBlockedGlobalPid + AND ( + citus_pid_for_gpid(blocking_global_pid) in ( + select * from unnest(pInterestingPids) + ) + OR citus_pid_for_gpid(blocking_global_pid) = workerProcessId + ) + ); + END; +$$ LANGUAGE plpgsql; + +REVOKE ALL ON FUNCTION citus_isolation_test_session_is_blocked(integer,integer[]) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql index ff0983910..6f494fa74 100644 --- a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql @@ -30,7 +30,7 @@ RETURNS boolean AS $$ -- run our isolation tests on a single physical machine, the PID part of -- the GPID is known to be unique within the whole cluster. RETURN EXISTS ( - SELECT 1 FROM citus_internal_global_blocked_processes() + SELECT 1 FROM citus_internal.global_blocked_processes() WHERE waiting_global_pid = mBlockedGlobalPid AND ( citus_pid_for_gpid(blocking_global_pid) in ( diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/12.2-1.sql new file mode 100644 index 000000000..880306b99 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/12.2-1.sql @@ -0,0 +1,47 @@ +SET search_path = 'pg_catalog'; + +CREATE VIEW citus.citus_lock_waits AS +WITH +unique_global_wait_edges_with_calculated_gpids AS ( +SELECT + -- if global_pid is NULL, it is most likely that a backend is blocked on a DDL + -- also for legacy reasons citus_internal.global_blocked_processes() returns groupId, we replace that with nodeIds + case WHEN waiting_global_pid !=0 THEN waiting_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(waiting_node_id), waiting_pid) END waiting_global_pid, + case WHEN blocking_global_pid !=0 THEN blocking_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(blocking_node_id), blocking_pid) END blocking_global_pid, + + -- citus_internal.global_blocked_processes returns groupId, we replace it here with actual + -- nodeId to be consisten with the other views + get_nodeid_for_groupid(blocking_node_id) as blocking_node_id, + get_nodeid_for_groupid(waiting_node_id) as waiting_node_id, + + blocking_transaction_waiting + + FROM citus_internal.global_blocked_processes() +), +unique_global_wait_edges AS +( + SELECT DISTINCT ON(waiting_global_pid, blocking_global_pid) * FROM unique_global_wait_edges_with_calculated_gpids +), +citus_dist_stat_activity_with_calculated_gpids AS +( + -- if global_pid is NULL, it is most likely that a backend is blocked on a DDL + SELECT CASE WHEN global_pid != 0 THEN global_pid ELSE citus_calculate_gpid(nodeid, pid) END global_pid, nodeid, pid, query FROM citus_dist_stat_activity +) +SELECT + waiting.global_pid as waiting_gpid, + blocking.global_pid as blocking_gpid, + waiting.query AS blocked_statement, + blocking.query AS current_statement_in_blocking_process, + waiting.nodeid AS waiting_nodeid, + blocking.nodeid AS blocking_nodeid +FROM + unique_global_wait_edges + JOIN + citus_dist_stat_activity_with_calculated_gpids waiting ON (unique_global_wait_edges.waiting_global_pid = waiting.global_pid) + JOIN + citus_dist_stat_activity_with_calculated_gpids blocking ON (unique_global_wait_edges.blocking_global_pid = blocking.global_pid); + +ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; + +RESET search_path; diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql index b3de12632..880306b99 100644 --- a/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql @@ -5,18 +5,18 @@ WITH unique_global_wait_edges_with_calculated_gpids AS ( SELECT -- if global_pid is NULL, it is most likely that a backend is blocked on a DDL - -- also for legacy reasons citus_internal_global_blocked_processes() returns groupId, we replace that with nodeIds + -- also for legacy reasons citus_internal.global_blocked_processes() returns groupId, we replace that with nodeIds case WHEN waiting_global_pid !=0 THEN waiting_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(waiting_node_id), waiting_pid) END waiting_global_pid, case WHEN blocking_global_pid !=0 THEN blocking_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(blocking_node_id), blocking_pid) END blocking_global_pid, - -- citus_internal_global_blocked_processes returns groupId, we replace it here with actual + -- citus_internal.global_blocked_processes returns groupId, we replace it here with actual -- nodeId to be consisten with the other views get_nodeid_for_groupid(blocking_node_id) as blocking_node_id, get_nodeid_for_groupid(waiting_node_id) as waiting_node_id, blocking_transaction_waiting - FROM citus_internal_global_blocked_processes() + FROM citus_internal.global_blocked_processes() ), unique_global_wait_edges AS ( diff --git a/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/12.2-1.sql new file mode 100644 index 000000000..3c1b1bdec --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/12.2-1.sql @@ -0,0 +1,7 @@ +CREATE FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int, checkobjectexistence boolean DEFAULT true) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_unmark_object_distributed$$; +COMMENT ON FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int, checkobjectexistence boolean) + IS 'Removes an object from citus.pg_dist_object after deletion. If checkobjectexistence is true, object existence check performed.' + 'Otherwise, object existence check is skipped.'; diff --git a/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/latest.sql b/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/latest.sql index 3f60c60c3..3c1b1bdec 100644 --- a/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/latest.sql @@ -1,6 +1,7 @@ -CREATE FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int) +CREATE FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int, checkobjectexistence boolean DEFAULT true) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$citus_unmark_object_distributed$$; -COMMENT ON FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int) - IS 'remove an object address from citus.pg_dist_object once the object has been deleted'; +COMMENT ON FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int, checkobjectexistence boolean) + IS 'Removes an object from citus.pg_dist_object after deletion. If checkobjectexistence is true, object existence check performed.' + 'Otherwise, object existence check is skipped.'; diff --git a/src/backend/distributed/sql/udfs/repl_origin_helper/12.2-1.sql b/src/backend/distributed/sql/udfs/repl_origin_helper/12.2-1.sql new file mode 100644 index 000000000..8c6d175d0 --- /dev/null +++ b/src/backend/distributed/sql/udfs/repl_origin_helper/12.2-1.sql @@ -0,0 +1,41 @@ +CREATE OR REPLACE FUNCTION citus_internal.start_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.start_replication_origin_tracking() + IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.stop_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.stop_replication_origin_tracking() + IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.is_replication_origin_tracking_active() +RETURNS boolean +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$; +COMMENT ON FUNCTION citus_internal.is_replication_origin_tracking_active() + IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking() + IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_stop_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_stop_replication_origin_tracking() + IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_is_replication_origin_tracking_active() +RETURNS boolean +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_is_replication_origin_tracking_active() + IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC'; diff --git a/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql b/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql index 5fe5a3bb9..8c6d175d0 100644 --- a/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql +++ b/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql @@ -1,3 +1,24 @@ +CREATE OR REPLACE FUNCTION citus_internal.start_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.start_replication_origin_tracking() + IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.stop_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.stop_replication_origin_tracking() + IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.is_replication_origin_tracking_active() +RETURNS boolean +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$; +COMMENT ON FUNCTION citus_internal.is_replication_origin_tracking_active() + IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking() RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/test/metadata_sync.c b/src/backend/distributed/test/metadata_sync.c index dec20c772..ce025cff9 100644 --- a/src/backend/distributed/test/metadata_sync.c +++ b/src/backend/distributed/test/metadata_sync.c @@ -50,6 +50,13 @@ activate_node_snapshot(PG_FUNCTION_ARGS) * so we are using first primary worker node just for test purposes. */ WorkerNode *dummyWorkerNode = GetFirstPrimaryWorkerNode(); + if (dummyWorkerNode == NULL) + { + ereport(ERROR, (errmsg("no worker nodes found"), + errdetail("Function activate_node_snapshot is meant to be " + "used when running tests on a multi-node cluster " + "with workers."))); + } /* * Create MetadataSyncContext which is used throughout nodes' activation. diff --git a/src/backend/distributed/test/run_from_same_connection.c b/src/backend/distributed/test/run_from_same_connection.c index 52b2e0b18..d22ee4428 100644 --- a/src/backend/distributed/test/run_from_same_connection.c +++ b/src/backend/distributed/test/run_from_same_connection.c @@ -190,6 +190,9 @@ run_commands_on_session_level_connection_to_node(PG_FUNCTION_ARGS) /* * override_backend_data_gpid is a wrapper around SetBackendDataGpid(). + * Also sets distributedCommandOriginator to true since the only caller of + * this method calls this function actually wants this backend to + * be treated as a distributed command originator with the given global pid. */ Datum override_backend_data_gpid(PG_FUNCTION_ARGS) @@ -199,6 +202,7 @@ override_backend_data_gpid(PG_FUNCTION_ARGS) uint64 gpid = PG_GETARG_INT64(0); SetBackendDataGlobalPID(gpid); + SetBackendDataDistributedCommandOriginator(true); PG_RETURN_VOID(); } diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index 5f868f548..9b6e7d122 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -395,7 +395,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto bool showCurrentBackendDetails = showAllBackends; BackendData *currentBackend = &backendManagementShmemData->backends[backendIndex]; - PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex]; + PGPROC *currentProc = GetPGProcByNumber(backendIndex); /* to work on data after releasing g spinlock to protect against errors */ uint64 transactionNumber = 0; @@ -420,7 +420,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto } Oid databaseId = currentBackend->databaseId; - int backendPid = ProcGlobal->allProcs[backendIndex].pid; + int backendPid = GetPGProcByNumber(backendIndex)->pid; /* * We prefer to use worker_query instead of distributedCommandOriginator in @@ -855,6 +855,16 @@ GetCurrentDistributedTransactionId(void) void AssignDistributedTransactionId(void) { + /* + * MyBackendData should always be available. However, we observed some + * crashes where certain hooks were not executed. + * Bug 3697586: Server crashes when assigning distributed transaction + */ + if (!MyBackendData) + { + ereport(ERROR, (errmsg("backend is not ready for distributed transactions"))); + } + pg_atomic_uint64 *transactionNumberSequence = &backendManagementShmemData->nextTransactionNumber; @@ -964,6 +974,23 @@ SetBackendDataGlobalPID(uint64 gpid) } +/* + * SetBackendDataDistributedCommandOriginator sets the distributedCommandOriginator + * field on MyBackendData. + */ +void +SetBackendDataDistributedCommandOriginator(bool distributedCommandOriginator) +{ + if (!MyBackendData) + { + return; + } + SpinLockAcquire(&MyBackendData->mutex); + MyBackendData->distributedCommandOriginator = distributedCommandOriginator; + SpinLockRelease(&MyBackendData->mutex); +} + + /* * GetGlobalPID returns the global process id of the current backend. */ @@ -1279,7 +1306,7 @@ ActiveDistributedTransactionNumbers(void) /* build list of starting procs */ for (int curBackend = 0; curBackend < MaxBackends; curBackend++) { - PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; + PGPROC *currentProc = GetPGProcByNumber(curBackend); BackendData currentBackendData; if (currentProc->pid == 0) diff --git a/src/backend/distributed/transaction/distributed_deadlock_detection.c b/src/backend/distributed/transaction/distributed_deadlock_detection.c index 27bb48ee3..5e8060a4f 100644 --- a/src/backend/distributed/transaction/distributed_deadlock_detection.c +++ b/src/backend/distributed/transaction/distributed_deadlock_detection.c @@ -375,7 +375,7 @@ AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode) for (int backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) { - PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex]; + PGPROC *currentProc = GetPGProcByNumber(backendIndex); BackendData currentBackendData; /* we're not interested in processes that are not active or waiting on a lock */ diff --git a/src/backend/distributed/transaction/lock_graph.c b/src/backend/distributed/transaction/lock_graph.c index 82f936243..695df2bf4 100644 --- a/src/backend/distributed/transaction/lock_graph.c +++ b/src/backend/distributed/transaction/lock_graph.c @@ -192,7 +192,7 @@ BuildGlobalWaitGraph(bool onlyDistributedTx) "waiting_node_id, waiting_transaction_num, waiting_transaction_stamp, " "blocking_global_pid,blocking_pid, blocking_node_id, " "blocking_transaction_num, blocking_transaction_stamp, blocking_transaction_waiting " - "FROM citus_internal_local_blocked_processes()"); + "FROM citus_internal.local_blocked_processes()"); } int querySent = SendRemoteCommand(connection, queryString->data); @@ -226,7 +226,7 @@ BuildGlobalWaitGraph(bool onlyDistributedTx) else if (!onlyDistributedTx && colCount != 11) { ereport(WARNING, (errmsg("unexpected number of columns from " - "citus_internal_local_blocked_processes"))); + "citus_internal.local_blocked_processes"))); continue; } @@ -559,7 +559,7 @@ BuildLocalWaitGraph(bool onlyDistributedTx) /* build list of starting procs */ for (int curBackend = 0; curBackend < totalProcs; curBackend++) { - PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; + PGPROC *currentProc = GetPGProcByNumber(curBackend); BackendData currentBackendData; if (currentProc->pid == 0) diff --git a/src/backend/distributed/transaction/remote_transaction.c b/src/backend/distributed/transaction/remote_transaction.c index 71b6a78dd..4c26e2478 100644 --- a/src/backend/distributed/transaction/remote_transaction.c +++ b/src/backend/distributed/transaction/remote_transaction.c @@ -107,6 +107,12 @@ bool IsMainDB = true; */ char *SuperuserRole = NULL; +/* + * IsMainDBCommandInXact shows if the query sent to the main database requires + * a transaction + */ +bool IsMainDBCommandInXact = true; + /* * start_management_transaction starts a management transaction @@ -190,7 +196,11 @@ RunCitusMainDBQuery(char *query) PostPortNumber, SuperuserRole, MainDb); - RemoteTransactionBegin(MainDBConnection); + + if (IsMainDBCommandInXact) + { + RemoteTransactionBegin(MainDBConnection); + } } SendRemoteCommand(MainDBConnection, query); diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 29f5b367e..9c7b45680 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -333,7 +333,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) * If this is a non-Citus main database we should try to commit the prepared * transactions created by the Citus main database on the worker nodes. */ - if (!IsMainDB && MainDBConnection != NULL) + if (!IsMainDB && MainDBConnection != NULL && IsMainDBCommandInXact) { RunCitusMainDBQuery(COMMIT_MANAGEMENT_COMMAND_2PC); CleanCitusMainDBConnection(); @@ -533,7 +533,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) * main database query. So if some error happens on the distributed main * database query we wouldn't have committed the current query. */ - if (!IsMainDB && MainDBConnection != NULL) + if (!IsMainDB && MainDBConnection != NULL && IsMainDBCommandInXact) { RunCitusMainDBQuery("COMMIT"); } diff --git a/src/backend/distributed/transaction/transaction_recovery.c b/src/backend/distributed/transaction/transaction_recovery.c index 653b962db..c31dc85a2 100644 --- a/src/backend/distributed/transaction/transaction_recovery.c +++ b/src/backend/distributed/transaction/transaction_recovery.c @@ -34,6 +34,7 @@ #include "utils/fmgroids.h" #include "utils/memutils.h" #include "utils/rel.h" +#include "utils/syscache.h" #include "utils/xid8.h" #include "pg_version_constants.h" @@ -261,11 +262,28 @@ RecoverWorkerTransactions(WorkerNode *workerNode) continue; } - /* Check if the transaction is created by an outer transaction from a non-main database */ bool outerXidIsNull = false; - Datum outerXidDatum = heap_getattr(heapTuple, - Anum_pg_dist_transaction_outerxid, - tupleDescriptor, &outerXidIsNull); + Datum outerXidDatum = 0; + if (EnableVersionChecks || + SearchSysCacheExistsAttName(DistTransactionRelationId(), "outer_xid")) + { + /* Check if the transaction is created by an outer transaction from a non-main database */ + outerXidDatum = heap_getattr(heapTuple, + Anum_pg_dist_transaction_outerxid, + tupleDescriptor, &outerXidIsNull); + } + else + { + /* + * Normally we don't try to recover prepared transactions when the + * binary version doesn't match the sql version. However, we skip + * those checks in regression tests by disabling + * citus.enable_version_checks. And when this is the case, while + * the C code looks for "outer_xid" attribute, pg_dist_transaction + * doesn't yet have it. + */ + Assert(!EnableVersionChecks); + } TransactionId outerXid = 0; if (!outerXidIsNull) diff --git a/src/backend/distributed/utils/citus_depended_object.c b/src/backend/distributed/utils/citus_depended_object.c index a160fcd56..7588f8594 100644 --- a/src/backend/distributed/utils/citus_depended_object.c +++ b/src/backend/distributed/utils/citus_depended_object.c @@ -465,8 +465,8 @@ static bool AnyObjectViolatesOwnership(DropStmt *dropStmt) { bool hasOwnershipViolation = false; - volatile ObjectAddress objectAddress = { 0 }; - Relation relation = NULL; + ObjectAddress objectAddress = { 0 }; + volatile Relation relation = NULL; ObjectType objectType = dropStmt->removeType; bool missingOk = dropStmt->missing_ok; @@ -480,8 +480,17 @@ AnyObjectViolatesOwnership(DropStmt *dropStmt) Node *object = NULL; foreach_ptr(object, dropStmt->objects) { + Relation rel = NULL; objectAddress = get_object_address(objectType, object, - &relation, AccessShareLock, missingOk); + &rel, AccessShareLock, missingOk); + + /* + * The object relation is qualified with volatile and its value is obtained from + * get_object_address(). Unless we can qualify the corresponding parameter of + * get_object_address() with volatile (this is a function defined in PostgreSQL), + * we cannot get rid of this assignment. + */ + relation = rel; if (OidIsValid(objectAddress.objectId)) { diff --git a/src/backend/distributed/utils/directory.c b/src/backend/distributed/utils/directory.c index bad585809..6701bf8fb 100644 --- a/src/backend/distributed/utils/directory.c +++ b/src/backend/distributed/utils/directory.c @@ -29,7 +29,7 @@ static bool FileIsLink(const char *filename, struct stat filestat); void CitusCreateDirectory(StringInfo directoryName) { - int makeOK = mkdir(directoryName->data, S_IRWXU); + int makeOK = MakePGDirectory(directoryName->data); if (makeOK != 0) { ereport(ERROR, (errcode_for_file_access(), diff --git a/src/backend/distributed/utils/replication_origin_session_utils.c b/src/backend/distributed/utils/replication_origin_session_utils.c index 370b061be..f96e23f8f 100644 --- a/src/backend/distributed/utils/replication_origin_session_utils.c +++ b/src/backend/distributed/utils/replication_origin_session_utils.c @@ -186,7 +186,7 @@ SetupReplicationOriginRemoteSession(MultiConnection *connection) { StringInfo replicationOriginSessionSetupQuery = makeStringInfo(); appendStringInfo(replicationOriginSessionSetupQuery, - "select pg_catalog.citus_internal_start_replication_origin_tracking();"); + "select citus_internal.start_replication_origin_tracking();"); ExecuteCriticalRemoteCommand(connection, replicationOriginSessionSetupQuery->data); connection->isReplicationOriginSessionSetup = true; @@ -205,7 +205,7 @@ ResetReplicationOriginRemoteSession(MultiConnection *connection) { StringInfo replicationOriginSessionResetQuery = makeStringInfo(); appendStringInfo(replicationOriginSessionResetQuery, - "select pg_catalog.citus_internal_stop_replication_origin_tracking();"); + "select citus_internal.stop_replication_origin_tracking();"); ExecuteCriticalRemoteCommand(connection, replicationOriginSessionResetQuery->data); connection->isReplicationOriginSessionSetup = false; @@ -229,7 +229,7 @@ IsRemoteReplicationOriginSessionSetup(MultiConnection *connection) StringInfo isReplicationOriginSessionSetupQuery = makeStringInfo(); appendStringInfo(isReplicationOriginSessionSetupQuery, - "SELECT pg_catalog.citus_internal_is_replication_origin_tracking_active()"); + "SELECT citus_internal.is_replication_origin_tracking_active()"); bool result = ExecuteRemoteCommandAndCheckResult(connection, isReplicationOriginSessionSetupQuery->data, diff --git a/src/backend/distributed/utils/resource_lock.c b/src/backend/distributed/utils/resource_lock.c index 13e88a16e..8ac269e43 100644 --- a/src/backend/distributed/utils/resource_lock.c +++ b/src/backend/distributed/utils/resource_lock.c @@ -707,13 +707,27 @@ SerializeNonCommutativeWrites(List *shardIntervalList, LOCKMODE lockMode) } List *replicatedShardList = NIL; - if (AnyTableReplicated(shardIntervalList, &replicatedShardList)) - { - if (ClusterHasKnownMetadataWorkers() && !IsFirstWorkerNode()) - { - LockShardListResourcesOnFirstWorker(lockMode, replicatedShardList); - } + bool anyTableReplicated = AnyTableReplicated(shardIntervalList, &replicatedShardList); + /* + * Acquire locks on the modified table. + * If the table is replicated, the locks are first acquired on the first worker node then locally. + * But if we're already on the first worker, acquiring on the first worker node and locally are the same operation. + * So we only acquire locally in that case. + */ + if (anyTableReplicated && ClusterHasKnownMetadataWorkers() && !IsFirstWorkerNode()) + { + LockShardListResourcesOnFirstWorker(lockMode, replicatedShardList); + } + LockShardListResources(shardIntervalList, lockMode); + + /* + * Next, acquire locks on the reference tables that are referenced by a foreign key if there are any. + * Note that LockReferencedReferenceShardResources() first acquires locks on the first worker, + * then locally. + */ + if (anyTableReplicated) + { ShardInterval *firstShardInterval = (ShardInterval *) linitial(replicatedShardList); if (ReferenceTableShardId(firstShardInterval->shardId)) @@ -728,8 +742,6 @@ SerializeNonCommutativeWrites(List *shardIntervalList, LOCKMODE lockMode) LockReferencedReferenceShardResources(firstShardInterval->shardId, lockMode); } } - - LockShardListResources(shardIntervalList, lockMode); } diff --git a/src/backend/distributed/worker/worker_shard_visibility.c b/src/backend/distributed/worker/worker_shard_visibility.c index 49131ef6d..3725800c3 100644 --- a/src/backend/distributed/worker/worker_shard_visibility.c +++ b/src/backend/distributed/worker/worker_shard_visibility.c @@ -54,6 +54,7 @@ static bool ShouldHideShardsInternal(void); static bool IsPgBgWorker(void); static bool FilterShardsFromPgclass(Node *node, void *context); static Node * CreateRelationIsAKnownShardFilter(int pgClassVarno); +static bool HasRangeTableRef(Node *node, int *varno); PG_FUNCTION_INFO_V1(citus_table_is_visible); PG_FUNCTION_INFO_V1(relation_is_a_known_shard); @@ -421,8 +422,8 @@ IsPgBgWorker(void) /* - * FilterShardsFromPgclass adds a NOT relation_is_a_known_shard(oid) filter - * to the security quals of pg_class RTEs. + * FilterShardsFromPgclass adds a "relation_is_a_known_shard(oid) IS NOT TRUE" + * filter to the quals of queries that query pg_class. */ static bool FilterShardsFromPgclass(Node *node, void *context) @@ -456,12 +457,35 @@ FilterShardsFromPgclass(Node *node, void *context) continue; } + /* + * Skip if pg_class is not actually queried. This is possible on + * INSERT statements that insert into pg_class. + */ + if (!expression_tree_walker((Node *) query->jointree->fromlist, + HasRangeTableRef, &varno)) + { + /* the query references pg_class */ + continue; + } + /* make sure the expression is in the right memory context */ MemoryContext originalContext = MemoryContextSwitchTo(queryContext); - /* add NOT relation_is_a_known_shard(oid) to the security quals of the RTE */ - rangeTableEntry->securityQuals = - list_make1(CreateRelationIsAKnownShardFilter(varno)); + + /* add relation_is_a_known_shard(oid) IS NOT TRUE to the quals of the query */ + Node *newQual = CreateRelationIsAKnownShardFilter(varno); + Node *oldQuals = query->jointree->quals; + if (oldQuals) + { + query->jointree->quals = (Node *) makeBoolExpr( + AND_EXPR, + list_make2(oldQuals, newQual), + -1); + } + else + { + query->jointree->quals = newQual; + } MemoryContextSwitchTo(originalContext); } @@ -473,9 +497,37 @@ FilterShardsFromPgclass(Node *node, void *context) } +/* + * HasRangeTableRef passed to expression_tree_walker to check if a node is a + * RangeTblRef of the given varno is present in a fromlist. + */ +static bool +HasRangeTableRef(Node *node, int *varno) +{ + if (node == NULL) + { + return false; + } + + if (IsA(node, RangeTblRef)) + { + RangeTblRef *rangeTblRef = (RangeTblRef *) node; + return rangeTblRef->rtindex == *varno; + } + + return expression_tree_walker(node, HasRangeTableRef, varno); +} + + /* * CreateRelationIsAKnownShardFilter constructs an expression of the form: - * NOT pg_catalog.relation_is_a_known_shard(oid) + * pg_catalog.relation_is_a_known_shard(oid) IS NOT TRUE + * + * The difference between "NOT pg_catalog.relation_is_a_known_shard(oid)" and + * "pg_catalog.relation_is_a_known_shard(oid) IS NOT TRUE" is that the former + * will return FALSE if the function returns NULL, while the second will return + * TRUE. This difference is important in the case of outer joins, because this + * filter might be applied on an oid that is then NULL. */ static Node * CreateRelationIsAKnownShardFilter(int pgClassVarno) @@ -496,9 +548,9 @@ CreateRelationIsAKnownShardFilter(int pgClassVarno) funcExpr->location = -1; funcExpr->args = list_make1(oidVar); - BoolExpr *notExpr = makeNode(BoolExpr); - notExpr->boolop = NOT_EXPR; - notExpr->args = list_make1(funcExpr); + BooleanTest *notExpr = makeNode(BooleanTest); + notExpr->booltesttype = IS_NOT_TRUE; + notExpr->arg = (Expr *) funcExpr; notExpr->location = -1; return (Node *) notExpr; diff --git a/src/backend/distributed/worker/worker_sql_task_protocol.c b/src/backend/distributed/worker/worker_sql_task_protocol.c index 2cf48fc6f..708fee15d 100644 --- a/src/backend/distributed/worker/worker_sql_task_protocol.c +++ b/src/backend/distributed/worker/worker_sql_task_protocol.c @@ -126,7 +126,6 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation, const char *nullPrintCharacter = "\\N"; const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); /* use the memory context that was in place when the DestReceiver was created */ MemoryContext oldContext = MemoryContextSwitchTo(taskFileDest->memoryContext); @@ -148,8 +147,7 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation, taskFileDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit( taskFileDest->filePath, - fileFlags, - fileMode)); + fileFlags)); if (copyOutState->binary) { diff --git a/src/include/distributed/backend_data.h b/src/include/distributed/backend_data.h index 8014fe5a6..5b3fcf2ac 100644 --- a/src/include/distributed/backend_data.h +++ b/src/include/distributed/backend_data.h @@ -61,6 +61,7 @@ extern void AssignGlobalPID(const char *applicationName); extern uint64 GetGlobalPID(void); extern void SetBackendDataDatabaseId(void); extern void SetBackendDataGlobalPID(uint64 gpid); +extern void SetBackendDataDistributedCommandOriginator(bool distributedCommandOriginator); extern uint64 ExtractGlobalPID(const char *applicationName); extern int ExtractNodeIdFromGlobalPID(uint64 globalPID, bool missingOk); extern int ExtractProcessIdFromGlobalPID(uint64 globalPID); diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index de15553e7..084308a8f 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -104,6 +104,10 @@ typedef struct DistributeObjectOps const DistributeObjectOps * GetDistributeObjectOps(Node *node); +/* functions to support node-wide object management commands from non-main dbs */ +extern bool RunPreprocessNonMainDBCommand(Node *parsetree); +extern void RunPostprocessNonMainDBCommand(Node *parsetree); + /* * Flags that can be passed to GetForeignKeyOids to indicate * which foreign key constraint OIDs are to be extracted diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index 9046c7309..52fcf7091 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -75,6 +75,15 @@ typedef struct DDLJob const char *metadataSyncCommand; List *taskList; /* worker DDL tasks to execute */ + + /* + * Only applicable when any of the tasks cannot be executed in a + * transaction block. + * + * Controls whether to emit a warning within the utility hook in case of a + * failure. + */ + bool warnForPartialFailure; } DDLJob; extern ProcessUtility_hook_type PrevProcessUtility; @@ -94,7 +103,8 @@ extern void ProcessUtilityParseTree(Node *node, const char *queryString, extern void MarkInvalidateForeignKeyGraph(void); extern void InvalidateForeignKeyGraphForDDL(void); extern List * DDLTaskList(Oid relationId, const char *commandString); -extern List * NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands); +extern List * NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands, + bool warnForPartialFailure); extern List * NodeDDLTaskList(TargetWorkerSet targets, List *commands); extern bool AlterTableInProgress(void); extern bool DropSchemaOrDBInProgress(void); diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index 437a9fd8e..4d4005c19 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -121,6 +121,8 @@ extern void AppendGrantedByInGrant(StringInfo buf, GrantStmt *stmt); extern void AppendGrantSharedPrefix(StringInfo buf, GrantStmt *stmt); extern void AppendGrantSharedSuffix(StringInfo buf, GrantStmt *stmt); +extern void AppendColumnNameList(StringInfo buf, List *columns); + /* Common deparser utils */ typedef struct DefElemOptionFormat diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h index 9f4c0a24b..617eed705 100644 --- a/src/include/distributed/metadata_sync.h +++ b/src/include/distributed/metadata_sync.h @@ -130,8 +130,13 @@ extern List * IdentitySequenceDependencyCommandList(Oid targetRelationId); extern List * DDLCommandsForSequence(Oid sequenceOid, char *ownerName); extern List * GetSequencesFromAttrDef(Oid attrdefOid); +#if PG_VERSION_NUM < PG_VERSION_15 +ObjectAddress GetAttrDefaultColumnAddress(Oid attrdefoid); +#endif +extern List * GetAttrDefsFromSequence(Oid seqOid); extern void GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList, AttrNumber attnum, char depType); +extern List * GetDependentRelationsWithSequence(Oid seqId, char depType); extern List * GetDependentFunctionsWithRelation(Oid relationId); extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum); extern void SetLocalEnableMetadataSync(bool state); @@ -189,7 +194,7 @@ extern void SendInterTableRelationshipCommands(MetadataSyncContext *context); #define WORKER_DROP_ALL_SHELL_TABLES \ "CALL pg_catalog.worker_drop_all_shell_tables(%s)" #define CITUS_INTERNAL_MARK_NODE_NOT_SYNCED \ - "SELECT citus_internal_mark_node_not_synced(%d, %d)" + "SELECT citus_internal.mark_node_not_synced(%d, %d)" #define REMOVE_ALL_CITUS_TABLES_COMMAND \ "SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition" diff --git a/src/include/distributed/query_colocation_checker.h b/src/include/distributed/query_colocation_checker.h index 2a46d364c..485e4a033 100644 --- a/src/include/distributed/query_colocation_checker.h +++ b/src/include/distributed/query_colocation_checker.h @@ -39,5 +39,7 @@ extern Query * WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation, List *requiredAttributes, RTEPermissionInfo *perminfo); extern List * CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes); +extern List * CreateFilteredTargetListForRelation(Oid relationId, + List *requiredAttributes); #endif /* QUERY_COLOCATION_CHECKER_H */ diff --git a/src/include/distributed/remote_transaction.h b/src/include/distributed/remote_transaction.h index 2b61c25bd..45e2eba70 100644 --- a/src/include/distributed/remote_transaction.h +++ b/src/include/distributed/remote_transaction.h @@ -152,5 +152,6 @@ extern bool IsMainDB; extern char *SuperuserRole; extern char *MainDb; extern struct MultiConnection *MainDBConnection; +extern bool IsMainDBCommandInXact; #endif /* REMOTE_TRANSACTION_H */ diff --git a/src/include/distributed/shard_cleaner.h b/src/include/distributed/shard_cleaner.h index e7d3dea1b..7609bd900 100644 --- a/src/include/distributed/shard_cleaner.h +++ b/src/include/distributed/shard_cleaner.h @@ -41,7 +41,8 @@ typedef enum CleanupObject CLEANUP_OBJECT_SUBSCRIPTION = 2, CLEANUP_OBJECT_REPLICATION_SLOT = 3, CLEANUP_OBJECT_PUBLICATION = 4, - CLEANUP_OBJECT_USER = 5 + CLEANUP_OBJECT_USER = 5, + CLEANUP_OBJECT_DATABASE = 6 } CleanupObject; /* @@ -81,16 +82,16 @@ typedef enum CleanupPolicy extern OperationId RegisterOperationNeedingCleanup(void); /* - * InsertCleanupRecordInCurrentTransaction inserts a new pg_dist_cleanup entry + * InsertCleanupOnSuccessRecordInCurrentTransaction inserts a new pg_dist_cleanup entry * as part of the current transaction. * * This is primarily useful for deferred cleanup (CLEANUP_DEFERRED_ON_SUCCESS) - * scenarios, since the records would roll back in case of failure. + * scenarios, since the records would roll back in case of failure. And for the + * same reason, always sets the policy type to CLEANUP_DEFERRED_ON_SUCCESS. */ -extern void InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy); +extern void InsertCleanupOnSuccessRecordInCurrentTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId); /* * InsertCleanupRecordInSeparateTransaction inserts a new pg_dist_cleanup entry @@ -99,10 +100,10 @@ extern void InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, * This is used in scenarios where we need to cleanup resources on operation * completion (CLEANUP_ALWAYS) or on failure (CLEANUP_ON_FAILURE). */ -extern void InsertCleanupRecordInSubtransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy); +extern void InsertCleanupRecordOutsideTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId, + CleanupPolicy policy); /* * FinalizeOperationNeedingCleanupOnSuccess is be called by an operation to signal diff --git a/src/include/distributed/transmit.h b/src/include/distributed/transmit.h index b86fd9150..9c2ab87ab 100644 --- a/src/include/distributed/transmit.h +++ b/src/include/distributed/transmit.h @@ -21,7 +21,8 @@ /* Function declarations for transmitting files between two nodes */ extern void RedirectCopyDataToRegularFile(const char *filename); extern void SendRegularFile(const char *filename); -extern File FileOpenForTransmit(const char *filename, int fileFlags, int fileMode); +extern File FileOpenForTransmit(const char *filename, int fileFlags); +extern File FileOpenForTransmitPerm(const char *filename, int fileFlags, int fileMode); #endif /* TRANSMIT_H */ diff --git a/src/test/regress/Pipfile b/src/test/regress/Pipfile index d4b2cc39f..8811bbd8c 100644 --- a/src/test/regress/Pipfile +++ b/src/test/regress/Pipfile @@ -5,7 +5,7 @@ verify_ssl = true [packages] mitmproxy = {editable = true, ref = "main", git = "https://github.com/citusdata/mitmproxy.git"} -construct = "==2.9.45" +construct = "*" docopt = "==0.6.2" cryptography = ">=41.0.4" pytest = "*" @@ -16,6 +16,7 @@ pytest-timeout = "*" pytest-xdist = "*" pytest-repeat = "*" pyyaml = "*" +werkzeug = "==2.3.7" [dev-packages] black = "*" diff --git a/src/test/regress/Pipfile.lock b/src/test/regress/Pipfile.lock index bdb42a1c3..fb82a6573 100644 --- a/src/test/regress/Pipfile.lock +++ b/src/test/regress/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "b92bf682aeeea1a66a16beaf78584a5318fd0ae908ce85c7e2a4807aa2bee532" + "sha256": "f8db86383082539f626f1402e720f5f2e3f9718b44a8f26110cf9f52e7ca46bc" }, "pipfile-spec": 6, "requires": { @@ -119,11 +119,11 @@ }, "certifi": { "hashes": [ - "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082", - "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9" + "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f", + "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1" ], "markers": "python_version >= '3.6'", - "version": "==2023.7.22" + "version": "==2024.2.2" }, "cffi": { "hashes": [ @@ -180,7 +180,7 @@ "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" ], - "markers": "python_version >= '3.8'", + "markers": "platform_python_implementation != 'PyPy'", "version": "==1.16.0" }, "click": { @@ -193,40 +193,51 @@ }, "construct": { "hashes": [ - "sha256:2271a0efd0798679dea825ff47e22a4c550456a5db0ba8baa82f7eae0af0118c" + "sha256:4d2472f9684731e58cc9c56c463be63baa1447d674e0d66aeb5627b22f512c29", + "sha256:c80be81ef595a1a821ec69dc16099550ed22197615f4320b57cc9ce2a672cb30" ], "index": "pypi", - "version": "==2.9.45" + "markers": "python_version >= '3.6'", + "version": "==2.10.70" }, "cryptography": { "hashes": [ - "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67", - "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311", - "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8", - "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13", - "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143", - "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f", - "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829", - "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd", - "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397", - "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac", - "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d", - "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a", - "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839", - "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e", - "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6", - "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9", - "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860", - "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca", - "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91", - "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d", - "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714", - "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb", - "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f" + "sha256:04859aa7f12c2b5f7e22d25198ddd537391f1695df7057c8700f71f26f47a129", + "sha256:069d2ce9be5526a44093a0991c450fe9906cdf069e0e7cd67d9dee49a62b9ebe", + "sha256:0d3ec384058b642f7fb7e7bff9664030011ed1af8f852540c76a1317a9dd0d20", + "sha256:0fab2a5c479b360e5e0ea9f654bcebb535e3aa1e493a715b13244f4e07ea8eec", + "sha256:0fea01527d4fb22ffe38cd98951c9044400f6eff4788cf52ae116e27d30a1ba3", + "sha256:1b797099d221df7cce5ff2a1d272761d1554ddf9a987d3e11f6459b38cd300fd", + "sha256:1e935c2900fb53d31f491c0de04f41110351377be19d83d908c1fd502ae8daa5", + "sha256:20100c22b298c9eaebe4f0b9032ea97186ac2555f426c3e70670f2517989543b", + "sha256:20180da1b508f4aefc101cebc14c57043a02b355d1a652b6e8e537967f1e1b46", + "sha256:25b09b73db78facdfd7dd0fa77a3f19e94896197c86e9f6dc16bce7b37a96504", + "sha256:2619487f37da18d6826e27854a7f9d4d013c51eafb066c80d09c63cf24505306", + "sha256:2eb6368d5327d6455f20327fb6159b97538820355ec00f8cc9464d617caecead", + "sha256:35772a6cffd1f59b85cb670f12faba05513446f80352fe811689b4e439b5d89e", + "sha256:39d5c93e95bcbc4c06313fc6a500cee414ee39b616b55320c1904760ad686938", + "sha256:3d96ea47ce6d0055d5b97e761d37b4e84195485cb5a38401be341fabf23bc32a", + "sha256:4dcab7c25e48fc09a73c3e463d09ac902a932a0f8d0c568238b3696d06bf377b", + "sha256:5fbf0f3f0fac7c089308bd771d2c6c7b7d53ae909dce1db52d8e921f6c19bb3a", + "sha256:6c25e1e9c2ce682d01fc5e2dde6598f7313027343bd14f4049b82ad0402e52cd", + "sha256:762f3771ae40e111d78d77cbe9c1035e886ac04a234d3ee0856bf4ecb3749d54", + "sha256:90147dad8c22d64b2ff7331f8d4cddfdc3ee93e4879796f837bdbb2a0b141e0c", + "sha256:935cca25d35dda9e7bd46a24831dfd255307c55a07ff38fd1a92119cffc34857", + "sha256:93fbee08c48e63d5d1b39ab56fd3fdd02e6c2431c3da0f4edaf54954744c718f", + "sha256:9541c69c62d7446539f2c1c06d7046aef822940d248fa4b8962ff0302862cc1f", + "sha256:c23f03cfd7d9826cdcbad7850de67e18b4654179e01fe9bc623d37c2638eb4ef", + "sha256:c3d1f5a1d403a8e640fa0887e9f7087331abb3f33b0f2207d2cc7f213e4a864c", + "sha256:d1998e545081da0ab276bcb4b33cce85f775adb86a516e8f55b3dac87f469548", + "sha256:d5cf11bc7f0b71fb71af26af396c83dfd3f6eed56d4b6ef95d57867bf1e4ba65", + "sha256:db0480ffbfb1193ac4e1e88239f31314fe4c6cdcf9c0b8712b55414afbf80db4", + "sha256:de4ae486041878dc46e571a4c70ba337ed5233a1344c14a0790c4c4be4bbb8b4", + "sha256:de5086cd475d67113ccb6f9fae6d8fe3ac54a4f9238fd08bfdb07b03d791ff0a", + "sha256:df34312149b495d9d03492ce97471234fd9037aa5ba217c2a6ea890e9166f151", + "sha256:ead69ba488f806fe1b1b4050febafdbf206b81fa476126f3e16110c818bac396" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==41.0.4" + "version": "==42.0.3" }, "docopt": { "hashes": [ @@ -237,11 +248,11 @@ }, "exceptiongroup": { "hashes": [ - "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9", - "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3" + "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14", + "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68" ], "markers": "python_version < '3.11'", - "version": "==1.1.3" + "version": "==1.2.0" }, "execnet": { "hashes": [ @@ -253,12 +264,12 @@ }, "filelock": { "hashes": [ - "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4", - "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd" + "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e", + "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==3.12.4" + "version": "==3.13.1" }, "flask": { "hashes": [ @@ -318,11 +329,11 @@ }, "jinja2": { "hashes": [ - "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852", - "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61" + "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa", + "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90" ], "markers": "python_version >= '3.7'", - "version": "==3.1.2" + "version": "==3.1.3" }, "kaitaistruct": { "hashes": [ @@ -342,69 +353,69 @@ }, "markupsafe": { "hashes": [ - "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e", - "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e", - "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431", - "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686", - "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c", - "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559", - "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc", - "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb", - "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939", - "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c", - "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0", - "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4", - "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9", - "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575", - "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba", - "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d", - "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd", - "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3", - "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00", - "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155", - "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac", - "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52", - "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f", - "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8", - "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b", - "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007", - "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24", - "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea", - "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198", - "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0", - "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee", - "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be", - "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2", - "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1", - "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707", - "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6", - "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c", - "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58", - "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823", - "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779", - "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636", - "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c", - "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad", - "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee", - "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc", - "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2", - "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48", - "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7", - "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e", - "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b", - "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa", - "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5", - "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e", - "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb", - "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9", - "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57", - "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc", - "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc", - "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2", - "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11" + "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf", + "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff", + "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f", + "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3", + "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532", + "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f", + "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617", + "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df", + "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4", + "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906", + "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f", + "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4", + "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8", + "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371", + "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2", + "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465", + "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52", + "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6", + "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169", + "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad", + "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2", + "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0", + "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029", + "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f", + "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a", + "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced", + "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5", + "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c", + "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf", + "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9", + "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb", + "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad", + "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3", + "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1", + "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46", + "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc", + "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a", + "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee", + "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900", + "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5", + "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea", + "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f", + "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5", + "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e", + "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a", + "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f", + "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50", + "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a", + "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b", + "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4", + "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff", + "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2", + "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46", + "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b", + "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf", + "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5", + "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5", + "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab", + "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd", + "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68" ], "markers": "python_version >= '3.7'", - "version": "==2.1.3" + "version": "==2.1.5" }, "mitmproxy": { "editable": true, @@ -491,11 +502,11 @@ }, "pluggy": { "hashes": [ - "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12", - "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7" + "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981", + "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be" ], "markers": "python_version >= '3.8'", - "version": "==1.3.0" + "version": "==1.4.0" }, "protobuf": { "hashes": [ @@ -526,12 +537,12 @@ }, "psycopg": { "hashes": [ - "sha256:7542c45810ea16356e5126c9b4291cbc3802aa326fcbba09ff154fe380de29be", - "sha256:cd711edb64b07d7f8a233c365806caf7e55bbe7cbbd8d5c680f672bb5353c8d5" + "sha256:31144d3fb4c17d78094d9e579826f047d4af1da6a10427d91dfcfb6ecdf6f12b", + "sha256:4d5a0a5a8590906daa58ebd5f3cfc34091377354a1acced269dd10faf55da60e" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==3.1.11" + "version": "==3.1.18" }, "publicsuffix2": { "hashes": [ @@ -542,11 +553,11 @@ }, "pyasn1": { "hashes": [ - "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57", - "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde" + "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58", + "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==0.5.0" + "version": "==0.5.1" }, "pycparser": { "hashes": [ @@ -557,11 +568,11 @@ }, "pyopenssl": { "hashes": [ - "sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2", - "sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac" + "sha256:6aa33039a93fffa4563e655b61d11364d01264be8ccb49906101e02a334530bf", + "sha256:ba07553fb6fd6a7a2259adb9b84e12302a9a8a75c44046e8bb5d3e5ee887e3c3" ], - "markers": "python_version >= '3.6'", - "version": "==23.2.0" + "markers": "python_version >= '3.7'", + "version": "==24.0.0" }, "pyparsing": { "hashes": [ @@ -579,48 +590,48 @@ }, "pytest": { "hashes": [ - "sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002", - "sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069" + "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c", + "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6" ], "index": "pypi", - "markers": "python_version >= '3.7'", - "version": "==7.4.2" + "markers": "python_version >= '3.8'", + "version": "==8.0.0" }, "pytest-asyncio": { "hashes": [ - "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d", - "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b" + "sha256:3a048872a9c4ba14c3e90cc1aa20cbc2def7d01c7c8db3777ec281ba9c057675", + "sha256:4e7093259ba018d58ede7d5315131d21923a60f8a6e9ee266ce1589685c89eac" ], "index": "pypi", - "markers": "python_version >= '3.7'", - "version": "==0.21.1" + "markers": "python_version >= '3.8'", + "version": "==0.23.5" }, "pytest-repeat": { "hashes": [ - "sha256:4474a7d9e9137f6d8cc8ae297f8c4168d33c56dd740aa78cfffe562557e6b96e", - "sha256:5cd3289745ab3156d43eb9c8e7f7d00a926f3ae5c9cf425bec649b2fe15bad5b" - ], - "index": "pypi", - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==0.9.1" - }, - "pytest-timeout": { - "hashes": [ - "sha256:c07ca07404c612f8abbe22294b23c368e2e5104b521c1790195561f37e1ac3d9", - "sha256:f6f50101443ce70ad325ceb4473c4255e9d74e3c7cd0ef827309dfa4c0d975c6" - ], - "index": "pypi", - "markers": "python_version >= '3.6'", - "version": "==2.1.0" - }, - "pytest-xdist": { - "hashes": [ - "sha256:d5ee0520eb1b7bcca50a60a518ab7a7707992812c578198f8b44fdfac78e8c93", - "sha256:ff9daa7793569e6a68544850fd3927cd257cc03a7ef76c95e86915355e82b5f2" + "sha256:26ab2df18226af9d5ce441c858f273121e92ff55f5bb311d25755b8d7abdd8ed", + "sha256:ffd3836dfcd67bb270bec648b330e20be37d2966448c4148c4092d1e8aba8185" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==3.3.1" + "version": "==0.9.3" + }, + "pytest-timeout": { + "hashes": [ + "sha256:3b0b95dabf3cb50bac9ef5ca912fa0cfc286526af17afc806824df20c2f72c90", + "sha256:bde531e096466f49398a59f2dde76fa78429a09a12411466f88a07213e220de2" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==2.2.0" + }, + "pytest-xdist": { + "hashes": [ + "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a", + "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==3.5.0" }, "pyyaml": { "hashes": [ @@ -653,6 +664,7 @@ "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", + "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef", "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", @@ -693,36 +705,37 @@ "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001", "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462", "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9", + "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe", "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b", "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b", "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615", + "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62", "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15", "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b", + "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1", "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9", "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675", - "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1", "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899", "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7", "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7", "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312", "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa", - "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f", "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91", - "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa", "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b", + "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6", "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3", "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334", "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5", "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3", "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe", - "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3", + "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c", "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed", "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337", "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880", + "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f", "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d", "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248", "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d", - "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279", "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf", "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512", "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069", @@ -731,7 +744,6 @@ "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d", "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31", "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92", - "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd", "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5", "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28", "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d", @@ -760,28 +772,28 @@ }, "tornado": { "hashes": [ - "sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f", - "sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5", - "sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d", - "sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3", - "sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2", - "sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a", - "sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16", - "sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a", - "sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17", - "sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0", - "sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe" + "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0", + "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63", + "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263", + "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052", + "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f", + "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee", + "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78", + "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579", + "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212", + "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e", + "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2" ], "markers": "python_version >= '3.8'", - "version": "==6.3.3" + "version": "==6.4" }, "typing-extensions": { "hashes": [ - "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0", - "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef" + "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", + "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" ], "markers": "python_version >= '3.8'", - "version": "==4.8.0" + "version": "==4.9.0" }, "urwid": { "hashes": [ @@ -791,12 +803,12 @@ }, "werkzeug": { "hashes": [ - "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc", - "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10" + "sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8", + "sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==3.0.1" + "version": "==2.3.7" }, "wsproto": { "hashes": [ @@ -864,40 +876,40 @@ "develop": { "attrs": { "hashes": [ - "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04", - "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015" + "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30", + "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1" ], "markers": "python_version >= '3.7'", - "version": "==23.1.0" + "version": "==23.2.0" }, "black": { "hashes": [ - "sha256:031e8c69f3d3b09e1aa471a926a1eeb0b9071f80b17689a655f7885ac9325a6f", - "sha256:13a2e4a93bb8ca74a749b6974925c27219bb3df4d42fc45e948a5d9feb5122b7", - "sha256:13ef033794029b85dfea8032c9d3b92b42b526f1ff4bf13b2182ce4e917f5100", - "sha256:14f04c990259576acd093871e7e9b14918eb28f1866f91968ff5524293f9c573", - "sha256:24b6b3ff5c6d9ea08a8888f6977eae858e1f340d7260cf56d70a49823236b62d", - "sha256:403397c033adbc45c2bd41747da1f7fc7eaa44efbee256b53842470d4ac5a70f", - "sha256:50254ebfa56aa46a9fdd5d651f9637485068a1adf42270148cd101cdf56e0ad9", - "sha256:538efb451cd50f43aba394e9ec7ad55a37598faae3348d723b59ea8e91616300", - "sha256:638619a559280de0c2aa4d76f504891c9860bb8fa214267358f0a20f27c12948", - "sha256:6a3b50e4b93f43b34a9d3ef00d9b6728b4a722c997c99ab09102fd5efdb88325", - "sha256:6ccd59584cc834b6d127628713e4b6b968e5f79572da66284532525a042549f9", - "sha256:75a2dc41b183d4872d3a500d2b9c9016e67ed95738a3624f4751a0cb4818fe71", - "sha256:7d30ec46de88091e4316b17ae58bbbfc12b2de05e069030f6b747dfc649ad186", - "sha256:8431445bf62d2a914b541da7ab3e2b4f3bc052d2ccbf157ebad18ea126efb91f", - "sha256:8fc1ddcf83f996247505db6b715294eba56ea9372e107fd54963c7553f2b6dfe", - "sha256:a732b82747235e0542c03bf352c126052c0fbc458d8a239a94701175b17d4855", - "sha256:adc3e4442eef57f99b5590b245a328aad19c99552e0bdc7f0b04db6656debd80", - "sha256:c46767e8df1b7beefb0899c4a95fb43058fa8500b6db144f4ff3ca38eb2f6393", - "sha256:c619f063c2d68f19b2d7270f4cf3192cb81c9ec5bc5ba02df91471d0b88c4c5c", - "sha256:cf3a4d00e4cdb6734b64bf23cd4341421e8953615cba6b3670453737a72ec204", - "sha256:cf99f3de8b3273a8317681d8194ea222f10e0133a24a7548c73ce44ea1679377", - "sha256:d6bc09188020c9ac2555a498949401ab35bb6bf76d4e0f8ee251694664df6301" + "sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8", + "sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8", + "sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd", + "sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9", + "sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31", + "sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92", + "sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f", + "sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29", + "sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4", + "sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693", + "sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218", + "sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a", + "sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23", + "sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0", + "sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982", + "sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894", + "sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540", + "sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430", + "sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b", + "sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2", + "sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6", + "sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==23.9.1" + "version": "==24.2.0" }, "click": { "hashes": [ @@ -909,30 +921,30 @@ }, "flake8": { "hashes": [ - "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23", - "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5" + "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132", + "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3" ], "index": "pypi", "markers": "python_full_version >= '3.8.1'", - "version": "==6.1.0" + "version": "==7.0.0" }, "flake8-bugbear": { "hashes": [ - "sha256:90cf04b19ca02a682feb5aac67cae8de742af70538590509941ab10ae8351f71", - "sha256:b182cf96ea8f7a8595b2f87321d7d9b28728f4d9c3318012d896543d19742cb5" + "sha256:663ef5de80cd32aacd39d362212983bc4636435a6f83700b4ed35acbd0b7d1b8", + "sha256:f9cb5f2a9e792dd80ff68e89a14c12eed8620af8b41a49d823b7a33064ac9658" ], "index": "pypi", "markers": "python_full_version >= '3.8.1'", - "version": "==23.9.16" + "version": "==24.2.6" }, "isort": { "hashes": [ - "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504", - "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6" + "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", + "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6" ], "index": "pypi", "markers": "python_full_version >= '3.8.0'", - "version": "==5.12.0" + "version": "==5.13.2" }, "mccabe": { "hashes": [ @@ -960,19 +972,19 @@ }, "pathspec": { "hashes": [ - "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20", - "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3" + "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", + "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712" ], - "markers": "python_version >= '3.7'", - "version": "==0.11.2" + "markers": "python_version >= '3.8'", + "version": "==0.12.1" }, "platformdirs": { "hashes": [ - "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3", - "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e" + "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068", + "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768" ], - "markers": "python_version >= '3.7'", - "version": "==3.11.0" + "markers": "python_version >= '3.8'", + "version": "==4.2.0" }, "pycodestyle": { "hashes": [ @@ -984,11 +996,11 @@ }, "pyflakes": { "hashes": [ - "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774", - "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc" + "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f", + "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a" ], "markers": "python_version >= '3.8'", - "version": "==3.1.0" + "version": "==3.2.0" }, "tomli": { "hashes": [ @@ -1000,11 +1012,11 @@ }, "typing-extensions": { "hashes": [ - "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0", - "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef" + "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", + "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" ], "markers": "python_version >= '3.8'", - "version": "==4.8.0" + "version": "==4.9.0" } } } diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index fb51bdc33..2dc5d6e88 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -32,6 +32,7 @@ s/"t2_[0-9]+"/"t2_xxxxxxx"/g # shard table names for MERGE tests s/merge_schema\.([_a-z0-9]+)_40[0-9]+ /merge_schema.\1_xxxxxxx /g s/pgmerge_schema\.([_a-z0-9]+)_40[0-9]+ /pgmerge_schema.\1_xxxxxxx /g +s/merge_vcore_schema\.([_a-z0-9]+)_40[0-9]+ /pgmerge_schema.\1_xxxxxxx /g # shard table names for multi_subquery s/ keyval(1|2|ref)_[0-9]+ / keyval\1_xxxxxxx /g diff --git a/src/test/regress/citus_tests/common.py b/src/test/regress/citus_tests/common.py index 2135a0eba..6c09e0b38 100644 --- a/src/test/regress/citus_tests/common.py +++ b/src/test/regress/citus_tests/common.py @@ -294,6 +294,9 @@ def _run_pg_regress( output_dir, "--use-existing", ] + if PG_MAJOR_VERSION >= 16: + command.append("--expecteddir") + command.append(output_dir) if extra_tests != "": command.append(extra_tests) diff --git a/src/test/regress/expected/alter_database_propagation.out b/src/test/regress/expected/alter_database_propagation.out index f01d39ab9..5c45a25e2 100644 --- a/src/test/regress/expected/alter_database_propagation.out +++ b/src/test/regress/expected/alter_database_propagation.out @@ -140,7 +140,12 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing ALTER DATABASE regression RESET lock_timeout DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx set citus.enable_create_database_propagation=on; +SET citus.next_operation_id TO 3000; create database "regression!'2"; +NOTICE: issuing ALTER DATABASE citus_temp_database_3000_0 RENAME TO "regression!'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE citus_temp_database_3000_0 RENAME TO "regression!'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx alter database "regression!'2" with CONNECTION LIMIT 100; NOTICE: issuing ALTER DATABASE "regression!'2" WITH CONNECTION LIMIT 100; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -189,7 +194,12 @@ alter DATABASE local_regression rename to local_regression2; drop database local_regression2; set citus.enable_create_database_propagation=on; drop database regression3; +SET citus.next_operation_id TO 3100; create database "regression!'4"; +NOTICE: issuing ALTER DATABASE citus_temp_database_3100_0 RENAME TO "regression!'4" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE citus_temp_database_3100_0 RENAME TO "regression!'4" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SELECT result FROM run_command_on_all_nodes( $$ ALTER TABLESPACE alter_db_tablespace RENAME TO "ts-needs\!escape" diff --git a/src/test/regress/expected/alter_table_add_column.out b/src/test/regress/expected/alter_table_add_column.out index 61e7319d9..0408aeeab 100644 --- a/src/test/regress/expected/alter_table_add_column.out +++ b/src/test/regress/expected/alter_table_add_column.out @@ -44,6 +44,15 @@ ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names HINT: You can issue each command separately such as ALTER TABLE referencing ADD COLUMN test_8 data_type; ALTER TABLE referencing ADD CONSTRAINT constraint_name CHECK (check_expression); ALTER TABLE referencing ADD COLUMN test_8 integer CONSTRAINT check_test_8 CHECK (test_8 > 0); +-- error out properly even if the REFERENCES does not include the column list of the referenced table +ALTER TABLE referencing ADD COLUMN test_9 bool, ADD COLUMN test_10 int REFERENCES referenced; +ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints +DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names +HINT: You can issue each command separately such as ALTER TABLE referencing ADD COLUMN test_10 data_type; ALTER TABLE referencing ADD CONSTRAINT constraint_name FOREIGN KEY (test_10) REFERENCES referenced; +ALTER TABLE referencing ADD COLUMN test_9 bool, ADD COLUMN test_10 int REFERENCES referenced(int_col); +ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints +DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names +HINT: You can issue each command separately such as ALTER TABLE referencing ADD COLUMN test_10 data_type; ALTER TABLE referencing ADD CONSTRAINT constraint_name FOREIGN KEY (test_10) REFERENCES referenced (int_col ); -- try to add test_6 again, but with IF NOT EXISTS ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 text; NOTICE: column "test_6" of relation "referencing" already exists, skipping diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index b9788e3b6..4ddbaae3f 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -3,7 +3,7 @@ -- For versions >= 15, pg15_create_drop_database_propagation.sql is used. -- For versions >= 16, pg16_create_drop_database_propagation.sql is used. -- Test the UDF that we use to issue database command during metadata sync. -SELECT pg_catalog.citus_internal_database_command(null); +SELECT citus_internal.database_command(null); ERROR: This is an internal Citus function can only be used in a distributed transaction CREATE ROLE test_db_commands WITH LOGIN; ALTER SYSTEM SET citus.enable_manual_metadata_changes_for_user TO 'test_db_commands'; @@ -21,22 +21,22 @@ SELECT pg_sleep(0.1); SET ROLE test_db_commands; -- fails on null input -SELECT pg_catalog.citus_internal_database_command(null); +SELECT citus_internal.database_command(null); ERROR: command cannot be NULL -- fails on non create / drop db command -SELECT pg_catalog.citus_internal_database_command('CREATE TABLE foo_bar(a int)'); -ERROR: citus_internal_database_command() can only be used for CREATE DATABASE command by Citus. -SELECT pg_catalog.citus_internal_database_command('SELECT 1'); -ERROR: citus_internal_database_command() can only be used for CREATE DATABASE command by Citus. -SELECT pg_catalog.citus_internal_database_command('asfsfdsg'); +SELECT citus_internal.database_command('CREATE TABLE foo_bar(a int)'); +ERROR: citus_internal.database_command() can only be used for CREATE DATABASE command by Citus. +SELECT citus_internal.database_command('SELECT 1'); +ERROR: citus_internal.database_command() can only be used for CREATE DATABASE command by Citus. +SELECT citus_internal.database_command('asfsfdsg'); ERROR: syntax error at or near "asfsfdsg" -SELECT pg_catalog.citus_internal_database_command(''); +SELECT citus_internal.database_command(''); ERROR: cannot execute multiple utility events RESET ROLE; ALTER ROLE test_db_commands nocreatedb; SET ROLE test_db_commands; --- make sure that pg_catalog.citus_internal_database_command doesn't cause privilege escalation -SELECT pg_catalog.citus_internal_database_command('CREATE DATABASE no_permissions'); +-- make sure that citus_internal.database_command doesn't cause privilege escalation +SELECT citus_internal.database_command('CREATE DATABASE no_permissions'); ERROR: permission denied to create database RESET ROLE; DROP USER test_db_commands; @@ -427,11 +427,16 @@ SELECT * FROM public.check_database_on_all_nodes('my_template_database') ORDER B --tests for special characters in database name set citus.enable_create_database_propagation=on; SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%CREATE DATABASE%'; +set citus.grep_remote_commands = '%DATABASE%'; +SET citus.next_operation_id TO 2000; create database "mydatabase#1'2"; -NOTICE: issuing CREATE DATABASE "mydatabase#1'2" +NOTICE: issuing CREATE DATABASE citus_temp_database_2000_0 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE DATABASE "mydatabase#1'2" +NOTICE: issuing CREATE DATABASE citus_temp_database_2000_0 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE citus_temp_database_2000_0 RENAME TO "mydatabase#1'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE citus_temp_database_2000_0 RENAME TO "mydatabase#1'2" DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx set citus.grep_remote_commands = '%DROP DATABASE%'; drop database if exists "mydatabase#1'2"; @@ -1264,6 +1269,95 @@ SELECT 1 FROM run_command_on_all_nodes($$REVOKE ALL ON TABLESPACE pg_default FRO DROP DATABASE no_createdb; DROP USER no_createdb; +-- Test a failure scenario by trying to create a distributed database that +-- already exists on one of the nodes. +\c - - - :worker_1_port +CREATE DATABASE "test_\!failure"; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +\c - - - :master_port +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE "test_\!failure"; +ERROR: database "test_\!failure" already exists +CONTEXT: while executing command on localhost:xxxxx +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); + database_cleanedup_on_node +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_\\!failure", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SET citus.enable_create_database_propagation TO OFF; +CREATE DATABASE "test_\!failure1"; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +\c - - - :worker_1_port +DROP DATABASE "test_\!failure"; +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE "test_\!failure1"; +ERROR: database "test_\!failure1" already exists +CONTEXT: while executing command on localhost:xxxxx +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); + database_cleanedup_on_node +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": {"datacl": null, "datname": "test_\\!failure1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c - - - :master_port +-- Before dropping local "test_\!failure1" database, test a failure scenario +-- by trying to create a distributed database that already exists "on local +-- node" this time. +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE "test_\!failure1"; +ERROR: database "test_\!failure1" already exists +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); + database_cleanedup_on_node +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "test_\\!failure1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SET citus.enable_create_database_propagation TO OFF; +DROP DATABASE "test_\!failure1"; SET citus.enable_create_database_propagation TO ON; --clean up resources created by this test -- DROP TABLESPACE is not supported, so we need to drop it manually. diff --git a/src/test/regress/expected/create_drop_database_propagation_pg15.out b/src/test/regress/expected/create_drop_database_propagation_pg15.out index 9a501558a..7e76d87f3 100644 --- a/src/test/regress/expected/create_drop_database_propagation_pg15.out +++ b/src/test/regress/expected/create_drop_database_propagation_pg15.out @@ -78,5 +78,11 @@ SELECT * FROM public.check_database_on_all_nodes('test_locale_provider') ORDER B worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_locale_provider", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} (3 rows) +\c test_locale_provider - - :worker_2_port +set citus.enable_create_database_propagation to on; +create database unsupported_option_from_non_main_db with oid = 12345; +ERROR: CREATE DATABASE option "oid" is not supported +\c regression - - :master_port +set citus.enable_create_database_propagation to on; drop database test_locale_provider; \c - - - :master_port diff --git a/src/test/regress/expected/create_ref_dist_from_citus_local.out b/src/test/regress/expected/create_ref_dist_from_citus_local.out index dc67400e0..cce7081b0 100644 --- a/src/test/regress/expected/create_ref_dist_from_citus_local.out +++ b/src/test/regress/expected/create_ref_dist_from_citus_local.out @@ -369,9 +369,9 @@ ROLLBACK; \set VERBOSITY DEFAULT -- Test the UDFs that we use to convert Citus local tables to single-shard tables and -- reference tables. -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, true); ERROR: This is an internal Citus function can only be used in a distributed transaction -SELECT pg_catalog.citus_internal_delete_placement_metadata(1); +SELECT citus_internal.delete_placement_metadata(1); ERROR: This is an internal Citus function can only be used in a distributed transaction CREATE ROLE test_user_create_ref_dist WITH LOGIN; GRANT ALL ON SCHEMA create_ref_dist_from_citus_local TO test_user_create_ref_dist; @@ -393,15 +393,15 @@ SET citus.next_shard_id TO 1850000; SET citus.next_placement_id TO 8510000; SET citus.shard_replication_factor TO 1; SET search_path TO create_ref_dist_from_citus_local; -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(null, 't', 1, true); +SELECT citus_internal.update_none_dist_table_metadata(null, 't', 1, true); ERROR: relation_id cannot be NULL -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, null, 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, null, 1, true); ERROR: replication_model cannot be NULL -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', null, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', null, true); ERROR: colocation_id cannot be NULL -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, null); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, null); ERROR: auto_converted cannot be NULL -SELECT pg_catalog.citus_internal_delete_placement_metadata(null); +SELECT citus_internal.delete_placement_metadata(null); ERROR: placement_id cannot be NULL CREATE TABLE udf_test (col_1 int); SELECT citus_add_local_table_to_metadata('udf_test'); @@ -411,8 +411,8 @@ SELECT citus_add_local_table_to_metadata('udf_test'); (1 row) BEGIN; - SELECT pg_catalog.citus_internal_update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); - citus_internal_update_none_dist_table_metadata + SELECT citus_internal.update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); + update_none_dist_table_metadata --------------------------------------------------------------------- (1 row) @@ -426,8 +426,8 @@ BEGIN; SELECT placementid AS udf_test_placementid FROM pg_dist_shard_placement WHERE shardid = get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.udf_test') \gset - SELECT pg_catalog.citus_internal_delete_placement_metadata(:udf_test_placementid); - citus_internal_delete_placement_metadata + SELECT citus_internal.delete_placement_metadata(:udf_test_placementid); + delete_placement_metadata --------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/create_role_propagation.out b/src/test/regress/expected/create_role_propagation.out index 48310bdc3..4d594ddab 100644 --- a/src/test/regress/expected/create_role_propagation.out +++ b/src/test/regress/expected/create_role_propagation.out @@ -121,17 +121,17 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, (rolpassword != '') as pass_not_empty, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | pass_not_empty | rolvaliduntil --------------------------------------------------------------------- - create_group | f | t | f | f | f | f | f | -1 | | infinity - create_group_2 | f | t | f | f | f | f | f | -1 | | infinity - create_role | f | t | f | f | f | f | f | -1 | | infinity - create_role"edge | f | t | f | f | f | f | f | -1 | | infinity - create_role'edge | f | t | f | f | f | f | f | -1 | | infinity - create_role_2 | f | t | f | f | f | f | f | -1 | | infinity - create_role_sysid | f | t | f | f | f | f | f | -1 | | infinity + create_group | f | t | f | f | f | f | f | -1 | | + create_group_2 | f | t | f | f | f | f | f | -1 | | + create_role | f | t | f | f | f | f | f | -1 | | + create_role"edge | f | t | f | f | f | f | f | -1 | | + create_role'edge | f | t | f | f | f | f | f | -1 | | + create_role_2 | f | t | f | f | f | f | f | -1 | | + create_role_sysid | f | t | f | f | f | f | f | -1 | | create_role_with_everything | t | t | t | t | t | t | t | 105 | t | Thu May 04 17:00:00 2045 PDT create_role_with_nothing | f | f | f | f | f | f | f | 3 | t | Mon May 04 17:00:00 2015 PDT - create_user | f | t | f | f | t | f | f | -1 | | infinity - create_user_2 | f | t | f | f | t | f | f | -1 | | infinity + create_user | f | t | f | f | t | f | f | -1 | | + create_user_2 | f | t | f | f | t | f | f | -1 | | (11 rows) SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; @@ -196,6 +196,7 @@ SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::t (1 row) \c - - - :master_port +create role test_admin_role; -- test grants with distributed and non-distributed roles SELECT master_remove_node('localhost', :worker_2_port); master_remove_node @@ -221,29 +222,72 @@ CREATE ROLE non_dist_role_4; NOTICE: not propagating CREATE ROLE/USER commands to other nodes HINT: Connect to other nodes directly to manually create all necessary users and roles. SET citus.enable_create_role_propagation TO ON; +grant dist_role_3,dist_role_1 to test_admin_role with admin option; SET ROLE dist_role_1; GRANT non_dist_role_1 TO non_dist_role_2; SET citus.enable_create_role_propagation TO OFF; +grant dist_role_1 to non_dist_role_1 with admin option; SET ROLE non_dist_role_1; -GRANT dist_role_1 TO dist_role_2; +GRANT dist_role_1 TO dist_role_2 granted by non_dist_role_1; RESET ROLE; SET citus.enable_create_role_propagation TO ON; -GRANT dist_role_3 TO non_dist_role_3; +GRANT dist_role_3 TO non_dist_role_3 granted by test_admin_role; GRANT non_dist_role_4 TO dist_role_4; +GRANT dist_role_3 TO dist_role_4 granted by test_admin_role; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + ) q; + $$ +); + result +--------------------------------------------------------------------- + [{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"non_dist_role_3","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] +(3 rows) + +REVOKE dist_role_3 from dist_role_4 granted by test_admin_role cascade; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + result +--------------------------------------------------------------------- + [{"member":"non_dist_role_3","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] +(3 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1','test_admin_role')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; role | member | grantor | admin_option --------------------------------------------------------------------- dist_role_1 | dist_role_2 | t | f + dist_role_1 | non_dist_role_1 | t | t + dist_role_1 | test_admin_role | t | t dist_role_3 | non_dist_role_3 | t | f + dist_role_3 | test_admin_role | t | t non_dist_role_1 | non_dist_role_2 | t | f non_dist_role_4 | dist_role_4 | t | f -(4 rows) +(7 rows) SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1; objid @@ -255,6 +299,25 @@ SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid':: non_dist_role_4 (5 rows) +REVOKE dist_role_3 from non_dist_role_3 granted by test_admin_role cascade; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + result +--------------------------------------------------------------------- + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] +(3 rows) + +revoke dist_role_3,dist_role_1 from test_admin_role cascade; +drop role test_admin_role; \c - - - :worker_1_port SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; role | member | grantor | admin_option @@ -276,9 +339,8 @@ SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; role | member | grantor | admin_option --------------------------------------------------------------------- - dist_role_1 | dist_role_2 | postgres | f non_dist_role_4 | dist_role_4 | postgres | f -(2 rows) +(1 row) SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; rolname diff --git a/src/test/regress/expected/drop_partitioned_table.out b/src/test/regress/expected/drop_partitioned_table.out index 660adb89c..a92dee711 100644 --- a/src/test/regress/expected/drop_partitioned_table.out +++ b/src/test/regress/expected/drop_partitioned_table.out @@ -354,8 +354,8 @@ NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.pa NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCADE NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') -NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400) -NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400) +NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400) +NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400) ROLLBACK; NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK @@ -377,8 +377,8 @@ NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCAD NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.child1_xxxxx CASCADE -NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400) -NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400) +NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400) +NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400) ROLLBACK; NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK diff --git a/src/test/regress/expected/failure_create_database.out b/src/test/regress/expected/failure_create_database.out new file mode 100644 index 000000000..81fcd4519 --- /dev/null +++ b/src/test/regress/expected/failure_create_database.out @@ -0,0 +1,386 @@ +SET citus.enable_create_database_propagation TO ON; +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +CREATE FUNCTION get_temp_databases_on_nodes() +RETURNS TEXT AS $func$ + SELECT array_agg(DISTINCT result ORDER BY result) AS temp_databases_on_nodes FROM run_command_on_all_nodes($$SELECT datname FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$) WHERE result != ''; +$func$ +LANGUAGE sql; +CREATE FUNCTION count_db_cleanup_records() +RETURNS TABLE(object_name TEXT, count INTEGER) AS $func$ + SELECT object_name, COUNT(*) FROM pg_dist_cleanup WHERE object_name LIKE 'citus_temp_database_%' GROUP BY object_name; +$func$ +LANGUAGE sql; +CREATE FUNCTION ensure_no_temp_databases_on_any_nodes() +RETURNS BOOLEAN AS $func$ + SELECT bool_and(result::boolean) AS no_temp_databases_on_any_nodes FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +$func$ +LANGUAGE sql; +-- cleanup any orphaned resources from previous runs +CALL citus_cleanup_orphaned_resources(); +SET citus.next_operation_id TO 4000; +ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +SELECT citus.mitmproxy('conn.kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^CREATE DATABASE").cancel(' || pg_backend_pid() || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: canceling statement due to user request +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + {citus_temp_database_4000_0} +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- + citus_temp_database_4000_0 | 3 +(1 row) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^ALTER DATABASE").cancel(' || pg_backend_pid() || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: canceling statement due to user request +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + {citus_temp_database_4001_0} +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- + citus_temp_database_4001_0 | 3 +(1 row) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection not open +CONTEXT: while executing command on localhost:xxxxx +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + {citus_temp_database_4002_0} +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- + citus_temp_database_4002_0 | 3 +(1 row) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +WARNING: connection not open +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +-- not call citus_cleanup_orphaned_resources() but recover the prepared transactions this time +SELECT 1 FROM recover_prepared_transactions(); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "db1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +DROP DATABASE db1; +-- after recovering the prepared transactions, cleanup records should also be removed +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT citus_internal.acquire_citus_advisory_object_class_lock").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onParse(query="^WITH distributed_object_data").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection not open +CONTEXT: while executing command on localhost:xxxxx +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + {citus_temp_database_4004_0} +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- + citus_temp_database_4004_0 | 3 +(1 row) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +CREATE DATABASE db1; +-- show that a successful database creation doesn't leave any pg_dist_cleanup records behind +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +DROP DATABASE db1; +DROP FUNCTION get_temp_databases_on_nodes(); +DROP FUNCTION ensure_no_temp_databases_on_any_nodes(); +DROP FUNCTION count_db_cleanup_records(); +SELECT 1 FROM citus_remove_node('localhost', :master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + diff --git a/src/test/regress/expected/failure_mx_metadata_sync.out b/src/test/regress/expected/failure_mx_metadata_sync.out index 7b4c04ff8..c2418e9ab 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync.out +++ b/src/test/regress/expected/failure_mx_metadata_sync.out @@ -132,7 +132,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; -- Check failures on DDL command propagation CREATE TABLE t2 (id int PRIMARY KEY); -SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_placement_metadata").kill()'); +SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_placement_metadata").kill()'); mitmproxy --------------------------------------------------------------------- @@ -140,7 +140,7 @@ SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_placement_metadat SELECT create_distributed_table('t2', 'id'); ERROR: connection not open -SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_shard_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_shard_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out b/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out index ec15c1ca3..e71f092c3 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out +++ b/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out @@ -667,7 +667,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_partition_ SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add shard metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -675,7 +675,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_meta SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").kill()'); mitmproxy --------------------------------------------------------------------- @@ -684,7 +684,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_meta SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add placement metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -692,7 +692,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_ SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").kill()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/function_with_case_when.out b/src/test/regress/expected/function_with_case_when.out new file mode 100644 index 000000000..18df5be0a --- /dev/null +++ b/src/test/regress/expected/function_with_case_when.out @@ -0,0 +1,32 @@ +CREATE SCHEMA function_with_case; +SET search_path TO function_with_case; +-- create function +CREATE OR REPLACE FUNCTION test_err(v1 text) + RETURNS text + LANGUAGE plpgsql + SECURITY DEFINER +AS $function$ + +begin + return v1 || ' - ok'; +END; +$function$; +do $$ declare + lNewValues text; + val text; +begin + val = 'test'; + lNewValues = test_err(v1 => case when val::text = 'test'::text then 'yes' else 'no' end); + raise notice 'lNewValues= %', lNewValues; +end;$$ ; +NOTICE: lNewValues= yes - ok +CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE +-- call function +SELECT test_err('test'); + test_err +--------------------------------------------------------------------- + test - ok +(1 row) + +DROP SCHEMA function_with_case CASCADE; +NOTICE: drop cascades to function test_err(text) diff --git a/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out b/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out new file mode 100644 index 000000000..594e3b74e --- /dev/null +++ b/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out @@ -0,0 +1,471 @@ +-- Public role has connect,temp,temporary privileges on database +-- To test these scenarios, we need to revoke these privileges from public role +-- since public role privileges are inherited by new roles/users +set citus.enable_create_database_propagation to on; +create database test_2pc_db; +show citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +revoke connect,temp,temporary on database test_2pc_db from public; +CREATE SCHEMA grant_on_database_propagation_non_maindb; +SET search_path TO grant_on_database_propagation_non_maindb; +-- test grant/revoke CREATE privilege propagation on database +create user "myuser'_test"; +\c test_2pc_db - - :master_port +grant create on database test_2pc_db to "myuser'_test"; +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke create on database test_2pc_db from "myuser'_test"; +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) +(3 rows) + +drop user "myuser'_test"; +--------------------------------------------------------------------- +-- test grant/revoke CONNECT privilege propagation on database +\c regression - - :master_port +create user myuser2; +\c test_2pc_db - - :master_port +grant CONNECT on database test_2pc_db to myuser2; +\c regression - - :master_port; +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke connect on database test_2pc_db from myuser2; +\c regression - - :master_port +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) +(3 rows) + +drop user myuser2; +--------------------------------------------------------------------- +-- test grant/revoke TEMP privilege propagation on database +\c regression - - :master_port +create user myuser3; +-- test grant/revoke temp on database +\c test_2pc_db - - :master_port +grant TEMP on database test_2pc_db to myuser3; +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + check_database_privileges +--------------------------------------------------------------------- + (TEMP,t) + (TEMP,t) + (TEMP,t) +(3 rows) + +\c test_2pc_db - - :worker_1_port +revoke TEMP on database test_2pc_db from myuser3; +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + check_database_privileges +--------------------------------------------------------------------- + (TEMP,f) + (TEMP,f) + (TEMP,f) +(3 rows) + +drop user myuser3; +--------------------------------------------------------------------- +\c regression - - :master_port +-- test temporary privilege on database +create user myuser4; +-- test grant/revoke temporary on database +\c test_2pc_db - - :worker_1_port +grant TEMPORARY on database test_2pc_db to myuser4; +\c regression - - :master_port +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke TEMPORARY on database test_2pc_db from myuser4; +\c regression - - :master_port; +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(3 rows) + +drop user myuser4; +--------------------------------------------------------------------- +-- test ALL privileges with ALL statement on database +create user myuser5; +grant ALL on database test_2pc_db to myuser5; +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +revoke ALL on database test_2pc_db from myuser5; +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +drop user myuser5; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database +create user myuser6; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser6; +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser6; +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +drop user myuser6; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option +create user myuser7; +create user myuser_1; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7; +set role myuser7; +--here since myuser7 does not have grant option, it should fail +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1; +WARNING: no privileges were granted for "test_2pc_db" +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7 with grant option; +set role myuser7; +--here since myuser have grant option, it should succeed +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1 granted by myuser7; +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict; +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict ; +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 cascade ; +--here we test if myuser7 still have the privileges after revoke grant option for +\c regression - - :master_port +select check_database_privileges('myuser7','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +reset role; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser_1; +\c regression - - :master_port +drop user myuser_1; +drop user myuser7; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database +-- and multi user +\c regression - - :master_port +create user myuser8; +create user myuser_2; +set citus.enable_create_database_propagation to on; +create database test_db; +revoke connect,temp,temporary on database test_db from public; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db to myuser8,myuser_2; +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 ; +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser_2; +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 cascade; +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +\c test_2pc_db - - :master_port +reset role; +\c regression - - :master_port +drop user myuser_2; +drop user myuser8; +set citus.enable_create_database_propagation to on; +drop database test_db; +--------------------------------------------------------------------- +-- rollbacks public role database privileges to original state +grant connect,temp,temporary on database test_2pc_db to public; +drop database test_2pc_db; +set citus.enable_create_database_propagation to off; +DROP SCHEMA grant_on_database_propagation_non_maindb CASCADE; +reset citus.enable_create_database_propagation; +reset search_path; +--------------------------------------------------------------------- diff --git a/src/test/regress/expected/grant_role_from_non_maindb.out b/src/test/regress/expected/grant_role_from_non_maindb.out new file mode 100644 index 000000000..6dc0b6c60 --- /dev/null +++ b/src/test/regress/expected/grant_role_from_non_maindb.out @@ -0,0 +1,160 @@ +CREATE SCHEMA grant_role2pc; +SET search_path TO grant_role2pc; +set citus.enable_create_database_propagation to on; +CREATE DATABASE grant_role2pc_db; +\c grant_role2pc_db +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +SET citus.superuser TO 'postgres'; +CREATE USER grant_role2pc_user1; +CREATE USER grant_role2pc_user2; +CREATE USER grant_role2pc_user3; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; +CREATE USER grant_role2pc_user6; +CREATE USER grant_role2pc_user7; +\c grant_role2pc_db +--test with empty superuser +SET citus.superuser TO ''; +grant grant_role2pc_user1 to grant_role2pc_user2; +ERROR: No superuser role is given for Citus main database connection +HINT: Set citus.superuser to a superuser role name +SET citus.superuser TO 'postgres'; +grant grant_role2pc_user1 to grant_role2pc_user2 with admin option granted by CURRENT_USER; +\c regression +select result FROM run_command_on_all_nodes( + $$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text = 'grant_role2pc_user2' + order by member::regrole::text, roleid::regrole::text + ) t + $$ +); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}] +(3 rows) + +\c grant_role2pc_db +--test grant under transactional context with multiple operations +BEGIN; +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user3 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user4 granted by grant_role2pc_user3 ; +COMMIT; +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION granted by grant_role2pc_user3; +grant grant_role2pc_user1 to grant_role2pc_user6; +ROLLBACK; +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user7; +SELECT 1/0; +ERROR: division by zero +commit; +\c regression +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +\c grant_role2pc_db +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; +\c regression +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +\c grant_role2pc_db +revoke admin option for grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3; +--test revoke under transactional context with multiple operations +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3 ; +revoke grant_role2pc_user1 from grant_role2pc_user4 granted by grant_role2pc_user3; +COMMIT; +\c grant_role2pc_db - - :worker_1_port +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; +revoke grant_role2pc_user1 from grant_role2pc_user3 cascade; +COMMIT; +\c regression +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user2','grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +\c grant_role2pc_db - - :worker_1_port +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user6; +COMMIT; +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +revoke grant_role2pc_user1 from grant_role2pc_user5,grant_role2pc_user6; +--clean resources +DROP SCHEMA grant_role2pc; +set citus.enable_create_database_propagation to on; +DROP DATABASE grant_role2pc_db; +drop user grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7; +drop user grant_role2pc_user1; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/expected/isolation_database_cmd_from_any_node.out b/src/test/regress/expected/isolation_database_cmd_from_any_node.out index 771c67fe8..e952bb457 100644 --- a/src/test/regress/expected/isolation_database_cmd_from_any_node.out +++ b/src/test/regress/expected/isolation_database_cmd_from_any_node.out @@ -1,6 +1,11 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s2-begin s1-acquire-citus-adv-oclass-lock s2-acquire-citus-adv-oclass-lock s1-commit s2-commit +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-acquire-citus-adv-oclass-lock: SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; @@ -18,8 +23,18 @@ acquire_citus_advisory_object_class_lock (1 row) step s2-commit: COMMIT; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s1-create-testdb1 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock-with-oid-testdb1 s2-acquire-citus-adv-oclass-lock-with-oid-testdb1 s1-commit s2-commit s1-drop-testdb1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-create-testdb1: CREATE DATABASE testdb1; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -39,8 +54,18 @@ acquire_citus_advisory_object_class_lock step s2-commit: COMMIT; step s1-drop-testdb1: DROP DATABASE testdb1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock-with-oid-testdb1 s2-acquire-citus-adv-oclass-lock-with-oid-testdb2 s1-commit s2-commit s1-drop-testdb1 s2-drop-testdb2 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-create-testdb1: CREATE DATABASE testdb1; step s2-create-testdb2: CREATE DATABASE testdb2; step s1-begin: BEGIN; @@ -61,8 +86,18 @@ step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-drop-testdb1: DROP DATABASE testdb1; step s2-drop-testdb2: DROP DATABASE testdb2; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock s2-acquire-citus-adv-oclass-lock-with-oid-testdb2 s1-commit s2-commit s2-drop-testdb2 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -81,8 +116,18 @@ acquire_citus_advisory_object_class_lock step s1-commit: COMMIT; step s2-commit: COMMIT; step s2-drop-testdb2: DROP DATABASE testdb2; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-db1 s2-rollback s2-drop-testdb2 s1-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C'; @@ -90,8 +135,18 @@ step s1-create-db1: CREATE DATABASE db1; step s2-rollback: ROLLBACK; step s2-drop-testdb2: DROP DATABASE testdb2; step s1-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-user-dbuser s1-grant-on-testdb2-to-dbuser s2-rollback s2-drop-testdb2 s1-drop-user-dbuser +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C'; @@ -100,8 +155,18 @@ step s1-grant-on-testdb2-to-dbuser: GRANT ALL ON DATABASE testdb2 TO dbuser; step s2-rollback: ROLLBACK; step s2-drop-testdb2: DROP DATABASE testdb2; step s1-drop-user-dbuser: DROP USER dbuser; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-testdb1 s1-create-user-dbuser s1-grant-on-testdb1-to-dbuser s2-rollback s2-drop-testdb2 s1-drop-testdb1 s1-drop-user-dbuser +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C'; @@ -112,8 +177,18 @@ step s2-rollback: ROLLBACK; step s2-drop-testdb2: DROP DATABASE testdb2; step s1-drop-testdb1: DROP DATABASE testdb1; step s1-drop-user-dbuser: DROP USER dbuser; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb2-rename-to-db1 s1-commit s2-rollback s1-drop-db1 s2-drop-testdb2 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-create-testdb1: CREATE DATABASE testdb1; step s2-create-testdb2: CREATE DATABASE testdb2; step s1-begin: BEGIN; @@ -126,8 +201,18 @@ ERROR: database "db1" already exists step s2-rollback: ROLLBACK; step s1-drop-db1: DROP DATABASE db1; step s2-drop-testdb2: DROP DATABASE testdb2; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb2-rename-to-db1 s1-rollback s2-commit s1-drop-testdb1 s2-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-create-testdb1: CREATE DATABASE testdb1; step s2-create-testdb2: CREATE DATABASE testdb2; step s1-begin: BEGIN; @@ -139,8 +224,18 @@ step s2-alter-testdb2-rename-to-db1: <... completed> step s2-commit: COMMIT; step s1-drop-testdb1: DROP DATABASE testdb1; step s2-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s1-create-testdb1 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb1-rename-to-db1 s1-commit s2-rollback s1-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-create-testdb1: CREATE DATABASE testdb1; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -151,8 +246,18 @@ step s2-alter-testdb1-rename-to-db1: <... completed> ERROR: database "testdb1" does not exist step s2-rollback: ROLLBACK; step s1-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s1-create-testdb1 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb1-rename-to-db1 s1-rollback s2-commit s2-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-create-testdb1: CREATE DATABASE testdb1; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -162,8 +267,18 @@ step s1-rollback: ROLLBACK; step s2-alter-testdb1-rename-to-db1: <... completed> step s2-commit: COMMIT; step s2-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-create-db1 s2-rollback s2-drop-testdb2 s1-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; @@ -172,8 +287,18 @@ step s2-rollback: ROLLBACK; step s1-create-db1: <... completed> step s2-drop-testdb2: DROP DATABASE testdb2; step s1-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-create-db1 s2-commit s2-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; @@ -182,8 +307,18 @@ step s2-commit: COMMIT; step s1-create-db1: <... completed> ERROR: database "db1" already exists step s2-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db2 s1-create-db1 s2-commit s2-drop-db2 s1-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-rename-to-db2: ALTER DATABASE testdb2 RENAME TO db2; @@ -192,16 +327,36 @@ step s2-commit: COMMIT; step s1-create-db1: <... completed> step s2-drop-db2: DROP DATABASE db2; step s1-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-drop-testdb2 s2-rollback +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; step s1-drop-testdb2: DROP DATABASE testdb2; step s2-rollback: ROLLBACK; step s1-drop-testdb2: <... completed> +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s1-create-db1 s2-begin s2-alter-testdb2-rename-to-db2 s1-drop-db1 s2-commit s2-drop-db2 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s1-create-db1: CREATE DATABASE db1; step s2-begin: BEGIN; @@ -209,3 +364,8 @@ step s2-alter-testdb2-rename-to-db2: ALTER DATABASE testdb2 RENAME TO db2; step s1-drop-db1: DROP DATABASE db1; step s2-commit: COMMIT; step s2-drop-db2: DROP DATABASE db2; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index e37724e4b..1aa7cbcc1 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -138,7 +138,7 @@ step s2-view-worker: ('%pg_prepared_xacts%'), ('%COMMIT%'), ('%dump_local_%'), - ('%citus_internal_local_blocked_processes%'), + ('%citus_internal.local_blocked_processes%'), ('%add_node%'), ('%csa_from_one_node%'), ('%pg_locks%')) diff --git a/src/test/regress/expected/issue_7477.out b/src/test/regress/expected/issue_7477.out new file mode 100644 index 000000000..224d85c6e --- /dev/null +++ b/src/test/regress/expected/issue_7477.out @@ -0,0 +1,62 @@ +--- Test for updating a table that has a foreign key reference to another reference table. +--- Issue #7477: Distributed deadlock after issuing a simple UPDATE statement +--- https://github.com/citusdata/citus/issues/7477 +CREATE TABLE table1 (id INT PRIMARY KEY); +SELECT create_reference_table('table1'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO table1 VALUES (1); +CREATE TABLE table2 ( + id INT, + info TEXT, + CONSTRAINT table1_id_fk FOREIGN KEY (id) REFERENCES table1 (id) + ); +SELECT create_reference_table('table2'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO table2 VALUES (1, 'test'); +--- Runs the update command in parallel on workers. +--- Due to bug #7477, before the fix, the result is non-deterministic +--- and have several rows of the form: +--- localhost | 57638 | f | ERROR: deadlock detected +--- localhost | 57637 | f | ERROR: deadlock detected +--- localhost | 57637 | f | ERROR: canceling the transaction since it was involved in a distributed deadlock +SELECT * FROM master_run_on_worker( + ARRAY['localhost', 'localhost','localhost', 'localhost','localhost', + 'localhost','localhost', 'localhost','localhost', 'localhost']::text[], + ARRAY[57638, 57637, 57637, 57638, 57637, 57638, 57637, 57638, 57638, 57637]::int[], + ARRAY['UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1' + ]::text[], + true); + node_name | node_port | success | result +--------------------------------------------------------------------- + localhost | 57638 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 + localhost | 57638 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 + localhost | 57638 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 + localhost | 57638 | t | UPDATE 1 + localhost | 57638 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 +(10 rows) + +--- cleanup +DROP TABLE table2; +DROP TABLE table1; diff --git a/src/test/regress/expected/issue_7705.out b/src/test/regress/expected/issue_7705.out new file mode 100644 index 000000000..20b078226 --- /dev/null +++ b/src/test/regress/expected/issue_7705.out @@ -0,0 +1,248 @@ +--- Test for verifying that column references (var nodes) in targets that cannot be pushed down +--- do not cause issues for the postgres planner, in particular postgres versions 16+, where the +--- varnullingrels field of a VAR node may contain relids of join relations that can make the var +--- NULL; in a rewritten distributed query without a join such relids do not have a meaning. +--- Issue #7705: [SEGFAULT] Querying distributed tables with window partition causes segmentation fault +--- https://github.com/citusdata/citus/issues/7705 +CREATE SCHEMA issue_7705; +SET search_path to 'issue_7705'; +SET citus.next_shard_id TO 30070000; +SET citus.shard_replication_factor TO 1; +SET citus.enable_local_execution TO ON; +CREATE TABLE t1 (id INT PRIMARY KEY); +INSERT INTO t1 VALUES (1), (2); +CREATE TABLE t2 (id INT, account_id INT, a2 INT, PRIMARY KEY(id, account_id)); +INSERT INTO t2 VALUES (3, 1, 10), (4, 2, 20), (5, 1, NULL); +SELECT create_distributed_table('t1', 'id'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$issue_7705.t1$$) + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('t2', 'account_id'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$issue_7705.t2$$) + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Test the issue seen in #7705; a target expression with +-- a window function that cannot be pushed down because the +-- partion by is not on the distribution column also includes +-- a column from the inner side of a left outer join, which +-- produces a non-empty varnullingrels set in PG 16 (and higher) +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; + id | max +--------------------------------------------------------------------- + 1 | 10 + 2 | 20 + 1 | +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; + QUERY PLAN +--------------------------------------------------------------------- + WindowAgg + Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3 + -> Sort + Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max + Sort Key: remote_scan.worker_column_3 + -> Custom Scan (Citus Adaptive) + Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (issue_7705.t1_30070000 t1 LEFT JOIN issue_7705.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery + Node: host=localhost port=xxxxx dbname=regression + -> Hash Right Join + Output: t1.id, t2.a2, t2.id + Inner Unique: true + Hash Cond: (t2.account_id = t1.id) + -> Seq Scan on issue_7705.t2_30070004 t2 + Output: t2.id, t2.account_id, t2.a2 + -> Hash + Output: t1.id + -> Seq Scan on issue_7705.t1_30070000 t1 + Output: t1.id +(22 rows) + +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id; + id | max +--------------------------------------------------------------------- + 1 | 10 + 2 | 20 + 1 | +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id; + QUERY PLAN +--------------------------------------------------------------------- + WindowAgg + Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3 + -> Sort + Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max + Sort Key: remote_scan.worker_column_3 + -> Custom Scan (Citus Adaptive) + Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (issue_7705.t2_30070004 t2 RIGHT JOIN issue_7705.t1_30070000 t1 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery + Node: host=localhost port=xxxxx dbname=regression + -> Hash Right Join + Output: t1.id, t2.a2, t2.id + Inner Unique: true + Hash Cond: (t2.account_id = t1.id) + -> Seq Scan on issue_7705.t2_30070004 t2 + Output: t2.id, t2.account_id, t2.a2 + -> Hash + Output: t1.id + -> Seq Scan on issue_7705.t1_30070000 t1 + Output: t1.id +(22 rows) + +SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; + id | max +--------------------------------------------------------------------- + 1 | + 1 | 10 + 2 | 20 +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Output: remote_scan.id, (max(remote_scan.max) OVER (?)), remote_scan.worker_column_3 + Group Key: remote_scan.id, max(remote_scan.max) OVER (?) + -> WindowAgg + Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3 + -> Sort + Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max + Sort Key: remote_scan.worker_column_3 + -> Custom Scan (Citus Adaptive) + Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (issue_7705.t1_30070000 t1 LEFT JOIN issue_7705.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery + Node: host=localhost port=xxxxx dbname=regression + -> Hash Right Join + Output: t1.id, t2.a2, t2.id + Inner Unique: true + Hash Cond: (t2.account_id = t1.id) + -> Seq Scan on issue_7705.t2_30070004 t2 + Output: t2.id, t2.account_id, t2.a2 + -> Hash + Output: t1.id + -> Seq Scan on issue_7705.t1_30070000 t1 + Output: t1.id +(25 rows) + +CREATE SEQUENCE test_seq START 101; +CREATE OR REPLACE FUNCTION TEST_F(int) returns INT language sql stable as $$ select $1 + 42; $$ ; +-- Issue #7705 also occurs if a target expression includes a column +-- of a distributed table that is on the inner side of a left outer +-- join and a call to nextval(), because nextval() cannot be pushed +-- down, and must be run on the coordinator +SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; + id | test_f +--------------------------------------------------------------------- + 1 | 153 + 1 | + 2 | 165 +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; + QUERY PLAN +--------------------------------------------------------------------- + Result + Output: remote_scan.id, ((remote_scan.test_f + (nextval('test_seq'::regclass))::integer) + 42) + -> Sort + Output: remote_scan.id, remote_scan.test_f + Sort Key: remote_scan.id + -> Custom Scan (Citus Adaptive) + Output: remote_scan.id, remote_scan.test_f + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Query: SELECT worker_column_1 AS id, worker_column_2 AS test_f FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2 FROM (issue_7705.t1_30070000 t1 LEFT JOIN issue_7705.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery + Node: host=localhost port=xxxxx dbname=regression + -> Hash Right Join + Output: t1.id, t2.a2 + Inner Unique: true + Hash Cond: (t2.account_id = t1.id) + -> Seq Scan on issue_7705.t2_30070004 t2 + Output: t2.id, t2.account_id, t2.a2 + -> Hash + Output: t1.id + -> Seq Scan on issue_7705.t1_30070000 t1 + Output: t1.id +(22 rows) + +SELECT t1.id, CASE nextval('test_seq') % 2 = 0 WHEN true THEN t2.a2 ELSE 1 END +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; + id | case +--------------------------------------------------------------------- + 1 | 10 + 1 | 1 + 2 | 20 +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, CASE nextval('test_seq') %2 = 0 WHEN true THEN t2.a2 ELSE 1 END +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; + QUERY PLAN +--------------------------------------------------------------------- + Result + Output: remote_scan.id, CASE ((nextval('test_seq'::regclass) % '2'::bigint) = 0) WHEN CASE_TEST_EXPR THEN remote_scan."case" ELSE 1 END + -> Sort + Output: remote_scan.id, remote_scan."case" + Sort Key: remote_scan.id + -> Custom Scan (Citus Adaptive) + Output: remote_scan.id, remote_scan."case" + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Query: SELECT worker_column_1 AS id, worker_column_2 AS "case" FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2 FROM (issue_7705.t1_30070000 t1 LEFT JOIN issue_7705.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery + Node: host=localhost port=xxxxx dbname=regression + -> Hash Right Join + Output: t1.id, t2.a2 + Inner Unique: true + Hash Cond: (t2.account_id = t1.id) + -> Seq Scan on issue_7705.t2_30070004 t2 + Output: t2.id, t2.account_id, t2.a2 + -> Hash + Output: t1.id + -> Seq Scan on issue_7705.t1_30070000 t1 + Output: t1.id +(22 rows) + +--- cleanup +\set VERBOSITY TERSE +DROP SCHEMA issue_7705 CASCADE; +NOTICE: drop cascades to 4 other objects +RESET all; diff --git a/src/test/regress/expected/merge.out b/src/test/regress/expected/merge.out index a73467e81..5056ba543 100644 --- a/src/test/regress/expected/merge.out +++ b/src/test/regress/expected/merge.out @@ -1128,7 +1128,7 @@ DO NOTHING WHEN NOT MATCHED THEN INSERT VALUES(rs_source.id); DEBUG: Creating MERGE router plan -DEBUG: +DEBUG: RESET client_min_messages; SELECT * INTO rs_local FROM rs_target ORDER BY 1 ; -- Should be equal @@ -1259,7 +1259,7 @@ DO NOTHING WHEN NOT MATCHED THEN INSERT VALUES(fn_source.id, fn_source.source); DEBUG: Creating MERGE router plan -DEBUG: +DEBUG: RESET client_min_messages; SELECT * INTO fn_local FROM fn_target ORDER BY 1 ; -- Should be equal @@ -1552,7 +1552,7 @@ BEGIN; SET citus.log_remote_commands to true; SET client_min_messages TO DEBUG1; EXECUTE merge_prepare(2); -DEBUG: +DEBUG: DEBUG: Creating MERGE router plan NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -1782,13 +1782,13 @@ NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) +NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) +NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) +NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) +NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SET citus.log_remote_commands to false; SELECT compare_tables(); @@ -1842,6 +1842,297 @@ SELECT compare_tables(); (1 row) ROLLBACK; +-- let's create source and target table +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 13000; +CREATE TABLE source_pushdowntest (id integer); +CREATE TABLE target_pushdowntest (id integer ); +-- let's distribute both table on id field +SELECT create_distributed_table('source_pushdowntest', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target_pushdowntest', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- we are doing this operation on single node setup let's figure out colocation id of both tables +-- both has same colocation id so both are colocated. +WITH colocations AS ( + SELECT colocationid + FROM pg_dist_partition + WHERE logicalrelid = 'source_pushdowntest'::regclass + OR logicalrelid = 'target_pushdowntest'::regclass +) +SELECT + CASE + WHEN COUNT(DISTINCT colocationid) = 1 THEN 'Same' + ELSE 'Different' + END AS colocation_status +FROM colocations; + colocation_status +--------------------------------------------------------------------- + Same +(1 row) + +SET client_min_messages TO DEBUG1; +-- Test 1 : tables are colocated AND query is multisharded AND Join On distributed column : should push down to workers. +EXPLAIN (costs off, timing off, summary off) +MERGE INTO target_pushdowntest t +USING source_pushdowntest s +ON t.id = s.id +WHEN NOT MATCHED THEN + INSERT (id) + VALUES (s.id); +DEBUG: +DEBUG: +DEBUG: +DEBUG: +DEBUG: Creating MERGE router plan + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus Adaptive) + Task Count: 4 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Merge on target_pushdowntest_4000068 t + -> Merge Left Join + Merge Cond: (s.id = t.id) + -> Sort + Sort Key: s.id + -> Seq Scan on source_pushdowntest_4000064 s + -> Sort + Sort Key: t.id + -> Seq Scan on target_pushdowntest_4000068 t + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Merge on target_pushdowntest_4000069 t + -> Merge Left Join + Merge Cond: (s.id = t.id) + -> Sort + Sort Key: s.id + -> Seq Scan on source_pushdowntest_4000065 s + -> Sort + Sort Key: t.id + -> Seq Scan on target_pushdowntest_4000069 t + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Merge on target_pushdowntest_4000070 t + -> Merge Left Join + Merge Cond: (s.id = t.id) + -> Sort + Sort Key: s.id + -> Seq Scan on source_pushdowntest_4000066 s + -> Sort + Sort Key: t.id + -> Seq Scan on target_pushdowntest_4000070 t + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Merge on target_pushdowntest_4000071 t + -> Merge Left Join + Merge Cond: (s.id = t.id) + -> Sort + Sort Key: s.id + -> Seq Scan on source_pushdowntest_4000067 s + -> Sort + Sort Key: t.id + -> Seq Scan on target_pushdowntest_4000071 t +(47 rows) + +-- Test 2 : tables are colocated AND source query is not multisharded : should push down to worker. +-- DEBUG LOGS show that query is getting pushed down +MERGE INTO target_pushdowntest t +USING (SELECT * from source_pushdowntest where id = 1) s +on t.id = s.id +WHEN NOT MATCHED THEN + INSERT (id) + VALUES (s.id); +DEBUG: +DEBUG: Creating MERGE router plan +-- Test 3 : tables are colocated source query is single sharded but not using source distributed column in insertion. let's not pushdown. +INSERT INTO source_pushdowntest (id) VALUES (3); +EXPLAIN (costs off, timing off, summary off) +MERGE INTO target_pushdowntest t +USING (SELECT 1 as somekey, id from source_pushdowntest where id = 1) s +on t.id = s.somekey +WHEN NOT MATCHED THEN + INSERT (id) + VALUES (s.somekey); +DEBUG: MERGE INSERT must use the source table distribution column value for push down to workers. Otherwise, repartitioning will be applied +DEBUG: MERGE INSERT must use the source table distribution column value for push down to workers. Otherwise, repartitioning will be applied +DEBUG: Creating MERGE repartition plan +DEBUG: Using column - index:0 from the source list to redistribute + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus MERGE INTO ...) + MERGE INTO target_pushdowntest method: pull to coordinator + -> Custom Scan (Citus Adaptive) + Task Count: 1 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_pushdowntest_4000064 source_pushdowntest + Filter: (id = 1) +(9 rows) + +-- let's verify if we use some other column from source for value of distributed column in target. +-- it should be inserted to correct shard of target. +CREATE TABLE source_withdata (id integer, some_number integer); +CREATE TABLE target_table (id integer, name text); +SELECT create_distributed_table('source_withdata', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target_table', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source_withdata (id, some_number) VALUES (1, 3); +-- we will use some_number column from source_withdata to insert into distributed column of target. +-- value of some_number is 3 let's verify what shard it should go to. +select worker_hash(3); + worker_hash +--------------------------------------------------------------------- + -28094569 +(1 row) + +-- it should go to second shard of target as target has 4 shard and hash "-28094569" comes in range of second shard. +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN NOT MATCHED THEN + INSERT (id, name) + VALUES (s.some_number, 'parag'); +DEBUG: Sub-query is not pushable, try repartitioning +DEBUG: MERGE command is only supported when all distributed tables are co-located and joined on their distribution columns +DEBUG: Creating MERGE repartition plan +DEBUG: Using column - index:1 from the source list to redistribute +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: +DEBUG: +DEBUG: +DEBUG: Execute MERGE task list +-- let's verify if data inserted to second shard of target. +EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM target_table; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus Adaptive) (actual rows=1 loops=1) + Task Count: 4 + Tuple data received from nodes: 9 bytes + Tasks Shown: All + -> Task + Tuple data received from node: 0 bytes + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on target_table_4000076 target_table (actual rows=0 loops=1) + -> Task + Tuple data received from node: 9 bytes + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on target_table_4000077 target_table (actual rows=1 loops=1) + -> Task + Tuple data received from node: 0 bytes + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on target_table_4000078 target_table (actual rows=0 loops=1) + -> Task + Tuple data received from node: 0 bytes + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on target_table_4000079 target_table (actual rows=0 loops=1) +(20 rows) + +-- let's verify target data too. +SELECT * FROM target_table; + id | name +--------------------------------------------------------------------- + 3 | parag +(1 row) + +-- test UPDATE : when source is single sharded and table are colocated +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN MATCHED THEN + UPDATE SET name = 'parag jain'; +DEBUG: Sub-query is not pushable, try repartitioning +DEBUG: MERGE command is only supported when all distributed tables are co-located and joined on their distribution columns +DEBUG: Creating MERGE repartition plan +DEBUG: Using column - index:1 from the source list to redistribute +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: +DEBUG: +DEBUG: +DEBUG: Execute MERGE task list +-- let's verify if data updated properly. +SELECT * FROM target_table; + id | name +--------------------------------------------------------------------- + 3 | parag jain +(1 row) + +-- let's see what happend when we try to update distributed key of target table +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN MATCHED THEN + UPDATE SET id = 1500; +ERROR: updating the distribution column is not allowed in MERGE actions +SELECT * FROM target_table; + id | name +--------------------------------------------------------------------- + 3 | parag jain +(1 row) + +-- test DELETE : when source is single sharded and table are colocated +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN MATCHED THEN + DELETE; +DEBUG: Sub-query is not pushable, try repartitioning +DEBUG: MERGE command is only supported when all distributed tables are co-located and joined on their distribution columns +DEBUG: Creating MERGE repartition plan +DEBUG: Using column - index:1 from the source list to redistribute +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: +DEBUG: +DEBUG: +DEBUG: Execute MERGE task list +-- let's verify if data deleted properly. +SELECT * FROM target_table; + id | name +--------------------------------------------------------------------- +(0 rows) + +-- +DELETE FROM source_withdata; +DELETE FROM target_table; +INSERT INTO source VALUES (1,1); +merge into target_table sda +using source_withdata sdn +on sda.id = sdn.id AND sda.id = 1 +when not matched then + insert (id) + values (10000); +ERROR: MERGE INSERT is using unsupported expression type for distribution column +DETAIL: Inserting arbitrary values that don't correspond to the joined column values can lead to unpredictable outcomes where rows are incorrectly distributed among different shards +SELECT * FROM target_table WHERE id = 10000; + id | name +--------------------------------------------------------------------- +(0 rows) + +RESET client_min_messages; -- This will prune shards with restriction information as NOT MATCHED is void BEGIN; SET citus.log_remote_commands to true; @@ -2898,14 +3189,14 @@ WHEN NOT MATCHED THEN -> Limit -> Sort Sort Key: id2 - -> Seq Scan on demo_source_table_4000135 demo_source_table + -> Seq Scan on demo_source_table_4000151 demo_source_table -> Distributed Subplan XXX_2 -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on demo_source_table_4000135 demo_source_table + -> Seq Scan on demo_source_table_4000151 demo_source_table Task Count: 1 Tasks Shown: All -> Task @@ -3119,10 +3410,10 @@ DEBUG: Creating MERGE repartition plan DEBUG: Using column - index:0 from the source list to redistribute DEBUG: Collect source query results on coordinator DEBUG: Create a MERGE task list that needs to be routed -DEBUG: -DEBUG: -DEBUG: -DEBUG: +DEBUG: +DEBUG: +DEBUG: +DEBUG: DEBUG: Execute MERGE task list RESET client_min_messages; SELECT * FROM target_6785 ORDER BY 1; @@ -3240,7 +3531,7 @@ USING s1 s ON t.id = s.id WHEN NOT MATCHED THEN INSERT (id) VALUES(s.val); -ERROR: MERGE INSERT must use the source table distribution column value +ERROR: MERGE INSERT must use the source's joining column for target's distribution column MERGE INTO t1 t USING s1 s ON t.id = s.id @@ -3966,7 +4257,7 @@ CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_ PL/pgSQL function citus_drop_trigger() line XX at PERFORM DROP FUNCTION merge_when_and_write(); DROP SCHEMA merge_schema CASCADE; -NOTICE: drop cascades to 103 other objects +NOTICE: drop cascades to 107 other objects DETAIL: drop cascades to function insert_data() drop cascades to table local_local drop cascades to table target @@ -4026,6 +4317,10 @@ drop cascades to table pg_source drop cascades to table citus_target drop cascades to table citus_source drop cascades to function compare_tables() +drop cascades to table source_pushdowntest +drop cascades to table target_pushdowntest +drop cascades to table source_withdata +drop cascades to table target_table drop cascades to view pg_source_view drop cascades to view citus_source_view drop cascades to table pg_pa_target @@ -4042,7 +4337,7 @@ drop cascades to table target_set drop cascades to table source_set drop cascades to table refsource_ref drop cascades to table pg_result -drop cascades to table refsource_ref_4000112 +drop cascades to table refsource_ref_4000128 drop cascades to table pg_ref drop cascades to table local_ref drop cascades to table reftarget_local @@ -4060,11 +4355,7 @@ drop cascades to table source_6785 drop cascades to table target_6785 drop cascades to function add_s(integer,integer) drop cascades to table pg -drop cascades to table t1_4000174 -drop cascades to table s1_4000175 +drop cascades to table t1_4000190 +drop cascades to table s1_4000191 drop cascades to table t1 -drop cascades to table s1 -drop cascades to table dist_target -drop cascades to table dist_source -drop cascades to view show_tables -and 3 other objects (see server log for list) +and 7 other objects (see server log for list) diff --git a/src/test/regress/expected/merge_schema_sharding.out b/src/test/regress/expected/merge_schema_sharding.out index 8a9ba89dd..17f6f6adb 100644 --- a/src/test/regress/expected/merge_schema_sharding.out +++ b/src/test/regress/expected/merge_schema_sharding.out @@ -98,14 +98,26 @@ WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b; DEBUG: Distributed tables are not co-located, try repartitioning DEBUG: For MERGE command, all the distributed tables must be colocated DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Distributed planning for a fast-path router query +DEBUG: Creating router plan +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) nullkey_c2_t1 ON (citus_table_alias.a OPERATOR(pg_catalog.=) nullkey_c2_t1.a) WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b +DEBUG: Execute MERGE task list MERGE INTO schema_shard_table1.nullkey_c1_t1 USING nullkey_c2_t1 ON (schema_shard_table1.nullkey_c1_t1.a = nullkey_c2_t1.a) WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c2_t1.a, nullkey_c2_t1.b); DEBUG: Distributed tables are not co-located, try repartitioning DEBUG: For MERGE command, all the distributed tables must be colocated DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Distributed planning for a fast-path router query +DEBUG: Creating router plan +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) nullkey_c2_t1 ON (citus_table_alias.a OPERATOR(pg_catalog.=) nullkey_c2_t1.a) WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b WHEN NOT MATCHED THEN INSERT (a, b) VALUES (nullkey_c2_t1.a, nullkey_c2_t1.b) +DEBUG: Execute MERGE task list -- with a distributed table SET search_path TO schema_shard_table1; MERGE INTO nullkey_c1_t1 USING schema_shard_table.distributed_table ON (nullkey_c1_t1.a = schema_shard_table.distributed_table.a) @@ -114,7 +126,12 @@ WHEN NOT MATCHED THEN INSERT VALUES (schema_shard_table.distributed_table.a, sch DEBUG: Distributed tables are not co-located, try repartitioning DEBUG: For MERGE command, all the distributed tables must be colocated DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) distributed_table ON (citus_table_alias.a OPERATOR(pg_catalog.=) distributed_table.a) WHEN MATCHED THEN UPDATE SET b = distributed_table.b WHEN NOT MATCHED THEN INSERT (a, b) VALUES (distributed_table.a, distributed_table.b) +DEBUG: Execute MERGE task list MERGE INTO schema_shard_table.distributed_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.distributed_table.a) WHEN MATCHED THEN DELETE WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b); @@ -163,7 +180,13 @@ WHEN MATCHED THEN UPDATE SET b = schema_shard_table.reference_table.b; DEBUG: A mix of distributed and reference table, try repartitioning DEBUG: A mix of distributed and reference table, routable query is not possible DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Distributed planning for a fast-path router query +DEBUG: Creating router plan +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) reference_table ON (citus_table_alias.a OPERATOR(pg_catalog.=) reference_table.a) WHEN MATCHED THEN UPDATE SET b = reference_table.b +DEBUG: Execute MERGE task list MERGE INTO schema_shard_table.reference_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.reference_table.a) WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t1.b WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b); @@ -174,7 +197,13 @@ WHEN MATCHED THEN UPDATE SET b = schema_shard_table.citus_local_table.b; DEBUG: A mix of distributed and local table, try repartitioning DEBUG: A mix of distributed and citus-local table, routable query is not possible DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Distributed planning for a fast-path router query +DEBUG: Creating router plan +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) citus_local_table ON (citus_table_alias.a OPERATOR(pg_catalog.=) citus_local_table.a) WHEN MATCHED THEN UPDATE SET b = citus_local_table.b +DEBUG: Execute MERGE task list MERGE INTO schema_shard_table.citus_local_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.citus_local_table.a) WHEN MATCHED THEN DELETE; DEBUG: A mix of distributed and local table, try repartitioning @@ -210,7 +239,12 @@ WHEN MATCHED THEN UPDATE SET b = cte.b; DEBUG: Distributed tables are not co-located, try repartitioning DEBUG: For MERGE command, all the distributed tables must be colocated DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte ON (citus_table_alias.a OPERATOR(pg_catalog.=) cte.a) WHEN MATCHED THEN UPDATE SET b = cte.b +DEBUG: Execute MERGE task list WITH cte AS materialized ( SELECT * FROM schema_shard_table.distributed_table ) @@ -219,7 +253,12 @@ WHEN MATCHED THEN UPDATE SET b = cte.b; DEBUG: Distributed tables are not co-located, try repartitioning DEBUG: For MERGE command, all the distributed tables must be colocated DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte ON (citus_table_alias.a OPERATOR(pg_catalog.=) cte.a) WHEN MATCHED THEN UPDATE SET b = cte.b +DEBUG: Execute MERGE task list SET client_min_messages TO WARNING; DROP SCHEMA schema_shard_table1 CASCADE; DROP SCHEMA schema_shard_table2 CASCADE; diff --git a/src/test/regress/expected/merge_vcore.out b/src/test/regress/expected/merge_vcore.out new file mode 100644 index 000000000..0eccb811b --- /dev/null +++ b/src/test/regress/expected/merge_vcore.out @@ -0,0 +1,587 @@ +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q +\endif +-- MERGE command performs a join from data_source to target_table_name +DROP SCHEMA IF EXISTS merge_vcore_schema CASCADE; +NOTICE: schema "merge_vcore_schema" does not exist, skipping +--MERGE INTO target +--USING source +--WHEN NOT MATCHED +--WHEN MATCHED AND +--WHEN MATCHED +CREATE SCHEMA merge_vcore_schema; +SET search_path TO merge_vcore_schema; +SET citus.shard_count TO 4; +SET citus.next_shard_id TO 4000000; +SET citus.explain_all_tasks TO true; +SET citus.shard_replication_factor TO 1; +SET citus.max_adaptive_executor_pool_size TO 1; +SET client_min_messages = warning; +SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +RESET client_min_messages; +-- ****************************************** CASE 1 : Both are singleSharded*************************************** +CREATE TABLE source ( + id bigint, + doc text +); +CREATE TABLE target ( + id bigint, + doc text +); +SELECT create_distributed_table('source', null, colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', null, colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"a" : 1} + 2 | {"a" : 2} +(2 rows) + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"b" : 1} + 2 | {"b" : 1} +(2 rows) + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus MERGE INTO ...) + MERGE INTO target method: pull to coordinator + -> Custom Scan (Citus Adaptive) + Task Count: 1 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000000 source +(8 rows) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- *************** CASE 2 : source is single sharded and target is distributed ******************************* +CREATE TABLE source ( + id bigint, + doc text +); +CREATE TABLE target ( + id bigint, + doc text +); +SELECT create_distributed_table('source', null, colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"a" : 1} + 2 | {"a" : 2} +(2 rows) + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"b" : 1} + 2 | {"b" : 1} +(2 rows) + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus MERGE INTO ...) + MERGE INTO target method: pull to coordinator + -> Custom Scan (Citus Adaptive) + Task Count: 1 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000002 source +(8 rows) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- *************** CASE 3 : source is distributed and target is single sharded ******************************* +CREATE TABLE source ( + id bigint, + doc text +); +CREATE TABLE target ( + id bigint, + doc text +); +SELECT create_distributed_table('source', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', null); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"a" : 1} + 2 | {"a" : 2} +(2 rows) + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"b" : 1} + 2 | {"b" : 1} +(2 rows) + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus MERGE INTO ...) + MERGE INTO target method: pull to coordinator + -> Custom Scan (Citus Adaptive) + Task Count: 4 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000007 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000008 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000009 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000010 source +(17 rows) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- *************** CASE 4 : both are distributed ******************************* +CREATE TABLE source ( + id bigint, + doc text +); +CREATE TABLE target ( + id bigint, + doc text +); +SELECT create_distributed_table('source', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"a" : 1} + 2 | {"a" : 2} +(2 rows) + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"b" : 1} + 2 | {"b" : 1} +(2 rows) + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus MERGE INTO ...) + MERGE INTO target method: repartition + -> Custom Scan (Citus Adaptive) + Task Count: 4 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000012 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000013 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000014 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000015 source +(17 rows) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- *************** CASE 5 : both are distributed & colocated ******************************* +CREATE TABLE source ( + id bigint, + doc text +); +CREATE TABLE target ( + id bigint, + doc text +); +SELECT create_distributed_table('source', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', 'id', colocate_with=>'source'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"a" : 1} + 2 | {"a" : 2} +(2 rows) + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"b" : 1} + 2 | {"b" : 1} +(2 rows) + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus MERGE INTO ...) + MERGE INTO target method: repartition + -> Custom Scan (Citus Adaptive) + Task Count: 4 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000020 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000021 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000022 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000023 source +(17 rows) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- *************** CASE 6 : both are singlesharded & colocated ******************************* +CREATE TABLE source ( + id bigint, + doc text +); +CREATE TABLE target ( + id bigint, + doc text +); +SELECT create_distributed_table('source', null); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', null, colocate_with=>'source'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"a" : 1} + 2 | {"a" : 2} +(2 rows) + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"b" : 1} + 2 | {"b" : 1} +(2 rows) + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus Adaptive) + Task Count: 1 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Merge on target_4000029 target + -> Nested Loop + -> Seq Scan on source_4000028 source + -> Materialize + -> Seq Scan on target_4000029 target + Filter: ('2'::bigint = id) +(11 rows) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- Bug Fix Test as part of this PR +-- Test 1 +CREATE TABLE source ( + id int, + age int, + salary int +); +CREATE TABLE target ( + id int, + age int, + salary int +); +SELECT create_distributed_table('source', 'id', colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', 'id', colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, age, salary) VALUES (1,30, 100000); +MERGE INTO ONLY target USING source ON (source.id = target.id) +WHEN NOT MATCHED THEN +INSERT (id, salary) VALUES (source.id, source.salary); +SELECT * FROM TARGET; + id | age | salary +--------------------------------------------------------------------- + 1 | | 100000 +(1 row) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- Test 2 +CREATE TABLE source ( + id int, + age int, + salary int +); +CREATE TABLE target ( + id int, + age int, + salary int +); +SELECT create_distributed_table('source', 'id', colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', 'id', colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, age, salary) VALUES (1,30, 100000); +MERGE INTO ONLY target USING source ON (source.id = target.id) +WHEN NOT MATCHED THEN +INSERT (salary, id) VALUES (source.salary, source.id); +SELECT * FROM TARGET; + id | age | salary +--------------------------------------------------------------------- + 1 | | 100000 +(1 row) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- Test 3 +CREATE TABLE source ( + id int, + age int, + salary int +); +CREATE TABLE target ( + id int, + age int, + salary int +); +SELECT create_distributed_table('source', 'id', colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', 'id', colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, age, salary) VALUES (1,30, 100000); +MERGE INTO ONLY target USING source ON (source.id = target.id) +WHEN NOT MATCHED THEN +INSERT (salary, id, age) VALUES (source.age, source.id, source.salary); +SELECT * FROM TARGET; + id | age | salary +--------------------------------------------------------------------- + 1 | 100000 | 30 +(1 row) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +DROP SCHEMA IF EXISTS merge_vcore_schema CASCADE; diff --git a/src/test/regress/expected/merge_vcore_0.out b/src/test/regress/expected/merge_vcore_0.out new file mode 100644 index 000000000..a7e3fbf20 --- /dev/null +++ b/src/test/regress/expected/merge_vcore_0.out @@ -0,0 +1,6 @@ +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q diff --git a/src/test/regress/expected/metadata_sync_from_non_maindb.out b/src/test/regress/expected/metadata_sync_from_non_maindb.out new file mode 100644 index 000000000..2aac507bd --- /dev/null +++ b/src/test/regress/expected/metadata_sync_from_non_maindb.out @@ -0,0 +1,311 @@ +CREATE SCHEMA metadata_sync_2pc_schema; +SET search_path TO metadata_sync_2pc_schema; +set citus.enable_create_database_propagation to on; +CREATE DATABASE metadata_sync_2pc_db; +revoke connect,temp,temporary on database metadata_sync_2pc_db from public; +\c metadata_sync_2pc_db +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +CREATE USER "grant_role2pc'_user1"; +CREATE USER "grant_role2pc'_user2"; +CREATE USER "grant_role2pc'_user3"; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; +\c regression +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db +grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION; +-- This section was originally testing a scenario where a user with the 'admin option' grants the same role to another user, also with the 'admin option'. +-- However, we encountered inconsistent errors because the 'admin option' grant is executed after the grant below. +-- Once we establish the correct order of granting, we will reintroduce the 'granted by' clause. +-- For now, we are commenting out the grant below that includes 'granted by', and instead, we are adding a grant without the 'granted by' clause. +-- grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5; +--test for grant on database +\c metadata_sync_2pc_db - - :master_port +grant create on database metadata_sync_2pc_db to "grant_role2pc'_user1"; +grant connect on database metadata_sync_2pc_db to "grant_role2pc'_user2"; +grant ALL on database metadata_sync_2pc_db to "grant_role2pc'_user3"; +\c regression +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) +(2 rows) + +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) +(2 rows) + +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) +(8 rows) + +-- test for security label on role +\c metadata_sync_2pc_db - - :master_port +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE grant_role2pc_user4 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "grant_role2pc'_user1" IS 'citus_classified'; +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +set citus.enable_create_database_propagation to on; +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('"grant_role2pc''_user2"','"grant_role2pc''_user3"','grant_role2pc_user4','grant_role2pc_user5') + order by member::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false}] + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false}] + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false}] +(3 rows) + +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) +(3 rows) + +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) +(3 rows) + +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +\c metadata_sync_2pc_db +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 ; +revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke ALL on database metadata_sync_2pc_db from "grant_role2pc'_user3"; +revoke CONNECT on database metadata_sync_2pc_db from "grant_role2pc'_user2"; +revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1"; +\c regression +drop user "grant_role2pc'_user1","grant_role2pc'_user2","grant_role2pc'_user3",grant_role2pc_user4,grant_role2pc_user5; +--test for user operations +--test for create user +\c regression - - :master_port +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db - - :master_port +CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1'; +\c metadata_sync_2pc_db - - :worker_1_port +CREATE USER "test_role2-needs\!escape" +WITH + SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION +LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1; +create role test_role3; +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] +(3 rows) + +--test for alter user +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db - - :master_port +-- Test ALTER ROLE with various options +ALTER ROLE test_role1 WITH PASSWORD 'new_password1'; +\c metadata_sync_2pc_db - - :worker_1_port +ALTER USER "test_role2-needs\!escape" +WITH + NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION +LIMIT 5 VALID UNTIL '2024-01-01'; +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] +(3 rows) + +--test for drop user +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db - - :worker_1_port +DROP ROLE test_role1, "test_role2-needs\!escape"; +\c metadata_sync_2pc_db - - :master_port +DROP ROLE test_role3; +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + + + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] +(3 rows) + +-- Clean up: drop the database on worker node 2 +\c regression - - :worker_2_port +DROP ROLE if exists test_role1, "test_role2-needs\!escape", test_role3; +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + + + +(3 rows) + +set citus.enable_create_database_propagation to on; +drop database metadata_sync_2pc_db; +drop schema metadata_sync_2pc_schema; +reset citus.enable_create_database_propagation; +reset search_path; diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index c1cb92d9b..9db68eaf5 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -14,7 +14,7 @@ CREATE TABLE test(col_1 int); -- not in a distributed transaction SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction -SELECT citus_internal_update_relation_colocation ('test'::regclass, 1); +SELECT citus_internal.update_relation_colocation ('test'::regclass, 1); ERROR: This is an internal Citus function can only be used in a distributed transaction -- in a distributed transaction, but the application name is not Citus BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -73,7 +73,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); + SELECT citus_internal.update_relation_colocation ('test'::regclass, 10); ERROR: must be owner of table test ROLLBACK; -- finally, a user can only add its own tables to the metadata @@ -349,7 +349,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); + SELECT citus_internal.update_placement_metadata(1420007, 10000, 11111); ERROR: could not find valid entry for shard xxxxx ROLLBACK; -- non-existing users should fail to pass the checks @@ -470,7 +470,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: must be owner of table super_user_table ROLLBACK; -- the user is only allowed to add a shard for add a table which is in pg_dist_partition @@ -485,7 +485,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: The relation "test_2" does not have a valid entry in pg_dist_partition. ROLLBACK; -- ok, now add the table to the pg_dist_partition @@ -525,8 +525,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); - citus_internal_update_relation_colocation + SELECT citus_internal.update_relation_colocation ('test_2'::regclass, 1231231232); + update_relation_colocation --------------------------------------------------------------------- (1 row) @@ -544,7 +544,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Invalid shard id: -1 ROLLBACK; -- invalid storage types are not allowed @@ -559,7 +559,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Invalid shard storage type: X ROLLBACK; -- NULL shard ranges are not allowed for hash distributed tables @@ -574,7 +574,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Shards of has distributed table "test_2" cannot have NULL shard ranges ROLLBACK; -- non-integer shard ranges are not allowed @@ -589,7 +589,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: invalid input syntax for type integer: "non-int" ROLLBACK; -- shardMinValue should be smaller than shardMaxValue @@ -604,7 +604,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: shardMinValue=-1610612737 is greater than shardMaxValue=-2147483648 for table "test_2", which is not allowed ROLLBACK; -- we do not allow overlapping shards for the same table @@ -621,7 +621,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text), ('test_2'::regclass, 1420002::bigint, 't'::"char", '10'::text, '50'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Shard intervals overlap for table "test_2": 1420001 and 1420000 ROLLBACK; -- Now let's check valid pg_dist_object updates @@ -780,7 +780,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Metadata syncing is only allowed for hash, reference and local tables: X ROLLBACK; -- we do not allow NULL shardMinMax values @@ -797,8 +797,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- (1 row) @@ -807,7 +807,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 1420000; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Shards of has distributed table "test_2" cannot have NULL shard ranges ROLLBACK; \c - metadata_sync_helper_role - :worker_1_port @@ -830,8 +830,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_2'::regclass, 1420004::bigint, 't'::"char", '51'::text, '60'::text), ('test_2'::regclass, 1420005::bigint, 't'::"char", '61'::text, '70'::text), ('test_3'::regclass, 1420008::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- @@ -852,7 +852,7 @@ BEGIN; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; -- now, add few more shards for test_3 to make it colocated with test_2 @@ -871,8 +871,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_3'::regclass, 1420011::bigint, 't'::"char", '41'::text, '50'::text), ('test_3'::regclass, 1420012::bigint, 't'::"char", '51'::text, '60'::text), ('test_3'::regclass, 1420013::bigint, 't'::"char", '61'::text, '70'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- @@ -894,7 +894,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Shards of reference or local table "test_ref" should have NULL shard ranges ROLLBACK; -- reference tables cannot have multiple shards @@ -910,7 +910,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), ('test_ref'::regclass, 1420007::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: relation "test_ref" has already at least one shard, adding more is not allowed ROLLBACK; -- finally, add a shard for reference tables @@ -925,8 +925,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- (1 row) @@ -946,8 +946,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- (1 row) @@ -966,9 +966,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS - (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) - SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS + (VALUES (-10, 0::bigint, 1::int, 1500000::bigint)) + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: could not find valid entry for shard xxxxx ROLLBACK; -- invalid placementid @@ -983,7 +983,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, 1::int, -10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: Shard placement has invalid placement id (-10) for shard(1420000) ROLLBACK; -- non-existing shard @@ -998,7 +998,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1430100, 0::bigint, 1::int, 10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: could not find valid entry for shard xxxxx ROLLBACK; -- non-existing node with non-existing node-id 123123123 @@ -1013,7 +1013,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES ( 1420000, 0::bigint, 123123123::int, 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: Node with group id 123123123 for shard placement xxxxx does not exist ROLLBACK; -- create a volatile function that returns the local node id @@ -1044,7 +1044,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, get_node_id(), 1500000), (1420000, 0::bigint, get_node_id(), 1500001)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: duplicate key value violates unique constraint "placement_shardid_groupid_unique_index" ROLLBACK; -- shard is not owned by us @@ -1059,7 +1059,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420007, 0::bigint, get_node_id(), 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: must be owner of table super_user_table ROLLBACK; -- sucessfully add placements @@ -1085,8 +1085,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1420011, 0::bigint, get_node_id(), 1500009), (1420012, 0::bigint, get_node_id(), 1500010), (1420013, 0::bigint, get_node_id(), 1500011)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - citus_internal_add_placement_metadata + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + add_placement_metadata --------------------------------------------------------------------- @@ -1112,8 +1112,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); - citus_internal_update_relation_colocation + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); + update_relation_colocation --------------------------------------------------------------------- (1 row) @@ -1130,7 +1130,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); ERROR: Node with group id 1014 for shard placement xxxxx does not exist COMMIT; -- fails because the source node doesn't contain the shard @@ -1143,7 +1143,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); ERROR: Active placement for shard xxxxx is not found on group:14 COMMIT; -- fails because shard does not exist @@ -1156,7 +1156,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(0, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 0 COMMIT; -- fails because none-existing shard @@ -1169,7 +1169,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 213123123123 COMMIT; -- fails because we do not own the shard @@ -1182,7 +1182,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(1420007, get_node_id(), get_node_id()+1); ERROR: must be owner of table super_user_table COMMIT; -- the user only allowed to delete their own shards @@ -1197,7 +1197,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; ERROR: must be owner of table super_user_table ROLLBACK; -- the user cannot delete non-existing shards @@ -1212,7 +1212,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420100)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; ERROR: Shard id does not exists: 1420100 ROLLBACK; -- sucessfully delete shards @@ -1239,8 +1239,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420000)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; - citus_internal_delete_shard_metadata + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; + delete_shard_metadata --------------------------------------------------------------------- (1 row) @@ -1274,7 +1274,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1298,7 +1298,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}' WHERE logicalrelid = 'test_2'::regclass; \endif - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1313,7 +1313,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: The relation "test_2" does not have a valid entry in pg_dist_partition. ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1328,7 +1328,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: Updating colocation ids are only allowed for hash and single shard distributed tables: a ROLLBACK; -- colocated hash distributed table should have the same dist key columns diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index ebaebdbd1..aaafce715 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1420,18 +1420,39 @@ SELECT * FROM multi_extension.print_extension_changes(); -- Snapshot of state at 12.2-1 ALTER EXTENSION citus UPDATE TO '12.2-1'; SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object + previous_object | current_object --------------------------------------------------------------------- - | function citus_internal.acquire_citus_advisory_object_class_lock(integer,cstring) void - | function citus_internal.add_colocation_metadata(integer,integer,integer,regtype,oid) void - | function citus_internal.add_object_metadata(text,text[],text[],integer,integer,boolean) void - | function citus_internal.add_partition_metadata(regclass,"char",text,integer,"char") void - | function citus_internal.commit_management_command_2pc() void - | function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void - | function citus_internal.mark_object_distributed(oid,text,oid,text) void - | function citus_internal.start_management_transaction(xid8) void - | function citus_internal_database_command(text) void -(9 rows) + function citus_unmark_object_distributed(oid,oid,integer) void | + | function citus_internal.acquire_citus_advisory_object_class_lock(integer,cstring) void + | function citus_internal.add_colocation_metadata(integer,integer,integer,regtype,oid) void + | function citus_internal.add_object_metadata(text,text[],text[],integer,integer,boolean) void + | function citus_internal.add_partition_metadata(regclass,"char",text,integer,"char") void + | function citus_internal.add_placement_metadata(bigint,bigint,integer,bigint) void + | function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) void + | function citus_internal.add_tenant_schema(oid,integer) void + | function citus_internal.adjust_local_clock_to_remote(cluster_clock) void + | function citus_internal.commit_management_command_2pc() void + | function citus_internal.database_command(text) void + | function citus_internal.delete_colocation_metadata(integer) void + | function citus_internal.delete_partition_metadata(regclass) void + | function citus_internal.delete_placement_metadata(bigint) void + | function citus_internal.delete_shard_metadata(bigint) void + | function citus_internal.delete_tenant_schema(oid) void + | function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void + | function citus_internal.global_blocked_processes() SETOF record + | function citus_internal.is_replication_origin_tracking_active() boolean + | function citus_internal.local_blocked_processes() SETOF record + | function citus_internal.mark_node_not_synced(integer,integer) void + | function citus_internal.mark_object_distributed(oid,text,oid,text) void + | function citus_internal.start_management_transaction(xid8) void + | function citus_internal.start_replication_origin_tracking() void + | function citus_internal.stop_replication_origin_tracking() void + | function citus_internal.unregister_tenant_schema_globally(oid,text) void + | function citus_internal.update_none_dist_table_metadata(oid,"char",bigint,boolean) void + | function citus_internal.update_placement_metadata(bigint,integer,integer) void + | function citus_internal.update_relation_colocation(oid,integer) void + | function citus_unmark_object_distributed(oid,oid,integer,boolean) void +(30 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/expected/multi_fix_partition_shard_index_names.out b/src/test/regress/expected/multi_fix_partition_shard_index_names.out index e243c6257..975a49351 100644 --- a/src/test/regress/expected/multi_fix_partition_shard_index_names.out +++ b/src/test/regress/expected/multi_fix_partition_shard_index_names.out @@ -696,13 +696,13 @@ NOTICE: issuing SELECT citus_internal.add_partition_metadata ('fix_idx_names.p2 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT citus_internal.add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370001, 's') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; +NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; +NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; +NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; +NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx diff --git a/src/test/regress/expected/multi_limit_clause.out b/src/test/regress/expected/multi_limit_clause.out index 65304b777..83cd05837 100644 --- a/src/test/regress/expected/multi_limit_clause.out +++ b/src/test/regress/expected/multi_limit_clause.out @@ -521,6 +521,86 @@ SELECT 1 | 1 (1 row) +-- check if we can correctly push the limit when it is null +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey LIMIT null; +DEBUG: push down of limit count: ALL + l_orderkey +--------------------------------------------------------------------- + 1 + 1 + 1 + 1 + 1 + 1 + 2 +(7 rows) + +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey OFFSET 1 LIMIT null; +DEBUG: push down of limit count: ALL + l_orderkey +--------------------------------------------------------------------- + 1 + 1 + 1 + 1 + 1 + 2 +(6 rows) + +SELECT count(*) FROM lineitem LIMIT null; +DEBUG: push down of limit count: ALL + count +--------------------------------------------------------------------- + 12000 +(1 row) + +SELECT count(*) FROM lineitem OFFSET 0 LIMIT null; +DEBUG: push down of limit count: ALL + count +--------------------------------------------------------------------- + 12000 +(1 row) + +-- check if we push the right limit when both offset and limit are given +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey OFFSET 1 LIMIT 3; +DEBUG: push down of limit count: 4 + l_orderkey +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey OFFSET null LIMIT 1; +DEBUG: push down of limit count: 1 + l_orderkey +--------------------------------------------------------------------- + 1 +(1 row) + +-- check if we can correctly push the limit when it is all +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 2 LIMIT all; +DEBUG: push down of limit count: ALL + l_orderkey +--------------------------------------------------------------------- + 1 + 1 + 1 + 1 + 1 + 1 +(6 rows) + +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 2 OFFSET 2 LIMIT all; +DEBUG: push down of limit count: ALL + l_orderkey +--------------------------------------------------------------------- + 1 + 1 + 1 + 1 +(4 rows) + SET client_min_messages TO NOTICE; -- non constants should not push down CREATE OR REPLACE FUNCTION my_limit() diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index b224153bb..d15e7516c 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -192,10 +192,10 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (61 rows) -- Drop single shard table @@ -255,8 +255,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (52 rows) -- Show that schema changes are included in the activate node snapshot @@ -317,8 +317,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Show that append distributed tables are not included in the activate node snapshot @@ -385,8 +385,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Show that range distributed tables are not included in the activate node snapshot @@ -446,8 +446,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Test start_metadata_sync_to_node and citus_activate_node UDFs @@ -2050,18 +2050,18 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (118 rows) -- shouldn't work since test_table is MX diff --git a/src/test/regress/expected/multi_metadata_sync_0.out b/src/test/regress/expected/multi_metadata_sync_0.out index 6745de17a..bc1775ada 100644 --- a/src/test/regress/expected/multi_metadata_sync_0.out +++ b/src/test/regress/expected/multi_metadata_sync_0.out @@ -192,10 +192,10 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (61 rows) -- Drop single shard table @@ -255,8 +255,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (52 rows) -- Show that schema changes are included in the activate node snapshot @@ -317,8 +317,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Show that append distributed tables are not included in the activate node snapshot @@ -385,8 +385,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Show that range distributed tables are not included in the activate node snapshot @@ -446,8 +446,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Test start_metadata_sync_to_node and citus_activate_node UDFs @@ -2050,18 +2050,18 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (118 rows) -- shouldn't work since test_table is MX diff --git a/src/test/regress/expected/multi_mx_hide_shard_names.out b/src/test/regress/expected/multi_mx_hide_shard_names.out index 116269a4e..762c6a30b 100644 --- a/src/test/regress/expected/multi_mx_hide_shard_names.out +++ b/src/test/regress/expected/multi_mx_hide_shard_names.out @@ -83,6 +83,52 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name test_table (1 row) +-- Even when using subquery and having no existing quals on pg_clcass +SELECT relname FROM (SELECT relname, relnamespace FROM pg_catalog.pg_class) AS q WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; + relname +--------------------------------------------------------------------- + test_table +(1 row) + +-- Check that inserts into pg_class don't add the filter +EXPLAIN (COSTS OFF) INSERT INTO pg_class VALUES (1); + QUERY PLAN +--------------------------------------------------------------------- + Insert on pg_class + -> Result +(2 rows) + +-- Unless it's an INSERT SELECT that queries from pg_class; +EXPLAIN (COSTS OFF) INSERT INTO pg_class SELECT * FROM pg_class; + QUERY PLAN +--------------------------------------------------------------------- + Insert on pg_class + -> Seq Scan on pg_class pg_class_1 + Filter: (relation_is_a_known_shard(oid) IS NOT TRUE) +(3 rows) + +-- Check that query that psql "\d test_table" does gets optimized to an index +-- scan +EXPLAIN (COSTS OFF) SELECT c.oid, + n.nspname, + c.relname +FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace +WHERE c.relname OPERATOR(pg_catalog.~) '^(test_table)$' COLLATE pg_catalog.default + AND pg_catalog.pg_table_is_visible(c.oid) +ORDER BY 2, 3; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: n.nspname, c.relname + -> Nested Loop Left Join + Join Filter: (n.oid = c.relnamespace) + -> Index Scan using pg_class_relname_nsp_index on pg_class c + Index Cond: (relname = 'test_table'::text) + Filter: ((relname ~ '^(test_table)$'::text) AND (relation_is_a_known_shard(oid) IS NOT TRUE) AND pg_table_is_visible(oid)) + -> Seq Scan on pg_namespace n +(8 rows) + commit prepared 'take-aggressive-lock'; -- now create an index \c - - - :master_port diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index 70a541d2a..0f31f2354 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -625,4 +625,17 @@ BEGIN ) q2 JOIN pg_dist_node USING (nodeid); END; -$func$ LANGUAGE plpgsql; \ No newline at end of file +$func$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION check_database_privileges(role_name text, db_name text, permissions text[]) +RETURNS TABLE(permission text, result text) +AS $func$ +DECLARE + permission text; +BEGIN + FOREACH permission IN ARRAY permissions + LOOP + RETURN QUERY EXECUTE format($inner$SELECT %s, result FROM run_command_on_all_nodes($$select has_database_privilege(%s,%s,%s); $$)$inner$, + quote_literal(permission), quote_literal(role_name), quote_literal(db_name), quote_literal(permission)); + END LOOP; +END; +$func$ LANGUAGE plpgsql; diff --git a/src/test/regress/expected/node_conninfo_reload.out b/src/test/regress/expected/node_conninfo_reload.out index 785e3e1b1..3b33c54b2 100644 --- a/src/test/regress/expected/node_conninfo_reload.out +++ b/src/test/regress/expected/node_conninfo_reload.out @@ -520,5 +520,61 @@ show citus.node_conninfo; -- Should work again ALTER TABLE test ADD COLUMN e INT; +-- show that we allow providing "host" param via citus.node_conninfo +ALTER SYSTEM SET citus.node_conninfo = 'sslmode=require host=nosuchhost'; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +-- fails due to invalid host +SELECT COUNT(*)>=0 FROM test; +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: could not parse network address "localhost": Name or service not known +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: could not parse network address "localhost": Name or service not known +SELECT array_agg(nodeid) as updated_nodeids from pg_dist_node WHERE nodename = 'localhost' \gset +UPDATE pg_dist_node SET nodename = '127.0.0.1' WHERE nodeid = ANY(:'updated_nodeids'::int[]); +ALTER SYSTEM SET citus.node_conninfo = 'sslmode=require host=localhost'; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +-- works when hostaddr is specified in pg_dist_node after providing host in citus.node_conninfo +SELECT COUNT(*)>=0 FROM test; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- restore original nodenames into pg_dist_node +UPDATE pg_dist_node SET nodename = 'localhost' WHERE nodeid = ANY(:'updated_nodeids'::int[]); +-- reset it +ALTER SYSTEM RESET citus.node_conninfo; +select pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +select pg_sleep(0.1); -- wait for config reload to apply + pg_sleep +--------------------------------------------------------------------- + +(1 row) + DROP SCHEMA node_conninfo_reload CASCADE; NOTICE: drop cascades to table test diff --git a/src/test/regress/expected/other_databases.out b/src/test/regress/expected/other_databases.out index a15c4bb50..c67746055 100644 --- a/src/test/regress/expected/other_databases.out +++ b/src/test/regress/expected/other_databases.out @@ -98,11 +98,11 @@ REVOKE ALL ON SCHEMA citus_internal FROM nonsuperuser; DROP USER other_db_user9, nonsuperuser; -- test from a worker \c - - - :worker_1_port -CREATE DATABASE other_db2; +CREATE DATABASE worker_other_db; NOTICE: Citus partially supports CREATE DATABASE for distributed databases DETAIL: Citus does not propagate CREATE DATABASE command to other nodes HINT: You can manually create a database and its extensions on other nodes. -\c other_db2 +\c worker_other_db CREATE USER worker_user1; BEGIN; CREATE USER worker_user2; @@ -129,8 +129,211 @@ SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1; -- some user creation commands will fail but let's make sure we try to drop them just in case DROP USER IF EXISTS worker_user1, worker_user2, worker_user3; NOTICE: role "worker_user3" does not exist, skipping -\c - - - :worker_1_port -DROP DATABASE other_db2; +-- test creating and dropping a database from a Citus non-main database +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db1 +CREATE DATABASE other_db3; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c other_db1 +DROP DATABASE other_db3; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c worker_other_db - - :worker_1_port +CREATE DATABASE other_db4; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c worker_other_db +DROP DATABASE other_db4; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +DROP DATABASE worker_other_db; +CREATE DATABASE other_db5; +-- disable create database propagation for the next test +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db5 - - :worker_2_port +-- locally create a database +CREATE DATABASE local_db; +\c regression - - - +-- re-enable create database propagation +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db5 - - :master_port +-- Test a scenario where create database fails because the database +-- already exists on another node and we don't crash etc. +CREATE DATABASE local_db; +ERROR: database "local_db" already exists +CONTEXT: while executing command on localhost:xxxxx +while executing command on localhost:xxxxx +\c regression - - - +SELECT * FROM public.check_database_on_all_nodes('local_db') ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "local_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c - - - :worker_2_port +-- locally drop the database for cleanup purposes +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +DROP DATABASE local_db; +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + \c - - - :master_port +DROP DATABASE other_db5; +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + DROP SCHEMA other_databases; DROP DATABASE other_db1; diff --git a/src/test/regress/expected/pg16.out b/src/test/regress/expected/pg16.out index a035fcfc4..546c0a832 100644 --- a/src/test/regress/expected/pg16.out +++ b/src/test/regress/expected/pg16.out @@ -81,29 +81,9 @@ SELECT create_distributed_table('tenk1', 'unique1'); (1 row) SET citus.log_remote_commands TO on; -EXPLAIN (GENERIC_PLAN) SELECT unique1 FROM tenk1 WHERE thousand = 1000; -NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SAVEPOINT citus_explain_savepoint -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing EXPLAIN (ANALYZE FALSE, VERBOSE FALSE, COSTS TRUE, BUFFERS FALSE, WAL FALSE, GENERIC_PLAN TRUE, TIMING FALSE, SUMMARY FALSE, FORMAT TEXT) SELECT unique1 FROM pg16.tenk1_950001 tenk1 WHERE (thousand OPERATOR(pg_catalog.=) 1000) -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing ROLLBACK TO SAVEPOINT citus_explain_savepoint -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing COMMIT -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0) - Task Count: 1 - Tasks Shown: All - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on tenk1_950001 tenk1 (cost=0.00..35.50 rows=10 width=4) - Filter: (thousand = 1000) -(7 rows) - -EXPLAIN (GENERIC_PLAN, ANALYZE) SELECT unique1 FROM tenk1 WHERE thousand = 1000; +EXPLAIN (GENERIC_PLAN) SELECT unique1 FROM tenk1 WHERE thousand = $1; +ERROR: EXPLAIN GENERIC_PLAN is currently not supported for Citus tables +EXPLAIN (GENERIC_PLAN, ANALYZE) SELECT unique1 FROM tenk1 WHERE thousand = $1; ERROR: EXPLAIN options ANALYZE and GENERIC_PLAN cannot be used together SET citus.log_remote_commands TO off; -- Proper error when creating statistics without a name on a Citus table diff --git a/src/test/regress/expected/remove_coordinator.out b/src/test/regress/expected/remove_coordinator.out index 0226a7cd0..e2fd5df02 100644 --- a/src/test/regress/expected/remove_coordinator.out +++ b/src/test/regress/expected/remove_coordinator.out @@ -5,6 +5,37 @@ SELECT master_remove_node('localhost', :master_port); (1 row) +-- to silence -potentially flaky- "could not establish connection after" warnings in below test +SET client_min_messages TO ERROR; +-- to fail fast when the hostname is not resolvable, as it will be the case below +SET citus.node_connection_timeout to '1s'; +BEGIN; + SET application_name TO 'new_app_name'; + -- that should fail because of bad hostname & port + SELECT citus_add_node('200.200.200.200', 1, 200); +ERROR: connection to the remote node postgres@200.200.200.200:1 failed + -- Since above command failed, now Postgres will need to revert the + -- application_name change made in this transaction and this will + -- happen within abort-transaction callback, so we won't be in a + -- transaction block while Postgres does that. + -- + -- And when the application_name changes, Citus tries to re-assign + -- the global pid but it does so only for Citus internal backends, + -- and doing so for Citus internal backends doesn't require being + -- in a transaction block and is safe. + -- + -- However, for the client external backends (like us here), Citus + -- doesn't re-assign the global pid because it's not needed and it's + -- not safe to do so outside of a transaction block. This is because, + -- it would require performing a catalog access to retrive the local + -- node id when the cached local node is invalidated like what just + -- happened here because of the failed citus_add_node() call made + -- above. + -- + -- So by failing here (rather than crashing), we ensure this behavior. +ROLLBACK; +RESET client_min_messages; +RESET citus.node_connection_timeout; -- restore coordinator for the rest of the tests SELECT citus_set_coordinator_host('localhost', :master_port); citus_set_coordinator_host diff --git a/src/test/regress/expected/role_operations_from_non_maindb.out b/src/test/regress/expected/role_operations_from_non_maindb.out new file mode 100644 index 000000000..3b51c89b0 --- /dev/null +++ b/src/test/regress/expected/role_operations_from_non_maindb.out @@ -0,0 +1,138 @@ +-- Create a new database +set citus.enable_create_database_propagation to on; +CREATE DATABASE role_operations_test_db; +SET citus.superuser TO 'postgres'; +-- Connect to the new database +\c role_operations_test_db +-- Test CREATE ROLE with various options +CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1'; +\c role_operations_test_db - - :worker_1_port +CREATE USER "test_role2-needs\!escape" +WITH + SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION +LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1; +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"}] +(3 rows) + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT r.rolname + FROM pg_dist_object d + JOIN pg_roles r ON d.objid = r.oid + WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape') + order by r.rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1"},{"rolname":"test_role2-needs\\!escape"}] + [{"rolname":"test_role1"},{"rolname":"test_role2-needs\\!escape"}] + [{"rolname":"test_role1"},{"rolname":"test_role2-needs\\!escape"}] +(3 rows) + +\c role_operations_test_db - - :master_port +-- Test ALTER ROLE with various options +ALTER ROLE test_role1 WITH PASSWORD 'new_password1'; +\c role_operations_test_db - - :worker_1_port +ALTER USER "test_role2-needs\!escape" +WITH + NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION +LIMIT 5 VALID UNTIL '2024-01-01'; +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"}] +(3 rows) + +\c role_operations_test_db - - :master_port +-- Test DROP ROLE +DROP ROLE no_such_role; -- fails nicely +ERROR: role "no_such_role" does not exist +DROP ROLE IF EXISTS no_such_role; -- doesn't fail +NOTICE: role "no_such_role" does not exist, skipping +CREATE ROLE new_role; +DROP ROLE IF EXISTS no_such_role, new_role; -- doesn't fail +NOTICE: role "no_such_role" does not exist, skipping +DROP ROLE IF EXISTS test_role1, "test_role2-needs\!escape"; +\c regression - - :master_port +--verify that roles and dist_object are dropped +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + + + +(3 rows) + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT r.rolname + FROM pg_roles r + WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role') + order by r.rolname + ) t +$$); + result +--------------------------------------------------------------------- + + + +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT count(*) leaked_pg_dist_object_records_for_roles + FROM pg_dist_object LEFT JOIN pg_authid ON (objid = oid) + WHERE classid = 1260 AND oid IS NULL +$$); + result +--------------------------------------------------------------------- + 0 + 0 + 0 +(3 rows) + +-- Clean up: drop the database +set citus.enable_create_database_propagation to on; +DROP DATABASE role_operations_test_db; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/expected/schema_based_sharding.out b/src/test/regress/expected/schema_based_sharding.out index 28cb45688..711c39141 100644 --- a/src/test/regress/expected/schema_based_sharding.out +++ b/src/test/regress/expected/schema_based_sharding.out @@ -13,19 +13,19 @@ SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0); SET client_min_messages TO NOTICE; -- Verify that the UDFs used to sync tenant schema metadata to workers -- fail on NULL input. -SELECT citus_internal_add_tenant_schema(NULL, 1); +SELECT citus_internal.add_tenant_schema(NULL, 1); ERROR: schema_id cannot be NULL -SELECT citus_internal_add_tenant_schema(1, NULL); +SELECT citus_internal.add_tenant_schema(1, NULL); ERROR: colocation_id cannot be NULL -SELECT citus_internal_delete_tenant_schema(NULL); +SELECT citus_internal.delete_tenant_schema(NULL); ERROR: schema_id cannot be NULL -SELECT citus_internal_unregister_tenant_schema_globally(1, NULL); +SELECT citus_internal.unregister_tenant_schema_globally(1, NULL); ERROR: schema_name cannot be NULL -SELECT citus_internal_unregister_tenant_schema_globally(NULL, 'text'); +SELECT citus_internal.unregister_tenant_schema_globally(NULL, 'text'); ERROR: schema_id cannot be NULL --- Verify that citus_internal_unregister_tenant_schema_globally can only +-- Verify that citus_internal.unregister_tenant_schema_globally can only -- be called on schemas that are dropped already. -SELECT citus_internal_unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); +SELECT citus_internal.unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); ERROR: schema is expected to be already dropped because this function is only expected to be called from Citus drop hook SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); ?column? @@ -1511,10 +1511,10 @@ SELECT pg_reload_conf(); t (1 row) --- Verify that citus_internal_unregister_tenant_schema_globally is a no-op +-- Verify that citus_internal.unregister_tenant_schema_globally is a no-op -- on workers. -SELECT citus_internal_unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); - citus_internal_unregister_tenant_schema_globally +SELECT citus_internal.unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); + unregister_tenant_schema_globally --------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/seclabel.out b/src/test/regress/expected/seclabel.out index f826de44b..ca6c6f984 100644 --- a/src/test/regress/expected/seclabel.out +++ b/src/test/regress/expected/seclabel.out @@ -115,16 +115,13 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; NOTICE: issuing SECURITY LABEL ON ROLE user1 IS 'citus_unclassified' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; -NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified' +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; +NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx \c - - - :worker_1_port --- command not allowed from worker node -SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified'; -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. -\c - - - :master_port -RESET citus.log_remote_commands; +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +-- command from the worker node should be propagated to the coordinator SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; node_type | result --------------------------------------------------------------------- @@ -132,6 +129,33 @@ SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORD worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} (2 rows) +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +RESET citus.log_remote_commands; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +\c - - - :master_port +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; node_type | result --------------------------------------------------------------------- @@ -143,9 +167,9 @@ SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') SET citus.log_remote_commands TO on; SET citus.grep_remote_commands = '%SECURITY LABEL%'; SELECT 1 FROM citus_add_node('localhost', :worker_2_port); -NOTICE: issuing SELECT worker_create_or_alter_role('user1', 'CREATE ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified' +NOTICE: issuing SELECT worker_create_or_alter_role('user1', 'CREATE ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL', 'ALTER ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT worker_create_or_alter_role('user 2', 'CREATE ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified' +NOTICE: issuing SELECT worker_create_or_alter_role('user 2', 'CREATE ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL', 'ALTER ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx ?column? --------------------------------------------------------------------- @@ -155,9 +179,9 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; node_type | result --------------------------------------------------------------------- - coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} - worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} - worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} (3 rows) SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; @@ -168,6 +192,35 @@ SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') worker_2 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} (3 rows) +-- disable the GUC and check that the command is not propagated +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +NOTICE: not propagating SECURITY LABEL commands to other nodes +HINT: Connect to other nodes directly to manually assign necessary labels. +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +\c - - - :worker_2_port +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus ''!unclassified'; +NOTICE: not propagating SECURITY LABEL commands to other nodes +HINT: Connect to other nodes directly to manually assign necessary labels. +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +RESET citus.enable_alter_role_propagation; -- cleanup RESET citus.log_remote_commands; DROP ROLE user1, "user 2"; diff --git a/src/test/regress/expected/seclabel_non_maindb.out b/src/test/regress/expected/seclabel_non_maindb.out new file mode 100644 index 000000000..48c89fb31 --- /dev/null +++ b/src/test/regress/expected/seclabel_non_maindb.out @@ -0,0 +1,111 @@ +-- SECLABEL +-- +-- Test suite for running SECURITY LABEL ON ROLE statements from non-main databases +SET citus.enable_create_database_propagation to ON; +CREATE DATABASE database1; +CREATE DATABASE database2; +\c - - - :worker_1_port +SET citus.enable_create_database_propagation to ON; +CREATE DATABASE database_w1; +\c - - - :master_port +CREATE ROLE user1; +\c database1 +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +SHOW citus.superuser; + citus.superuser +--------------------------------------------------------------------- + postgres +(1 row) + +CREATE ROLE "user 2"; +-- Set a SECURITY LABEL on a role from a non-main database +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_unclassified'; +-- Check the result +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +\c database1 +-- Set a SECURITY LABEL on database, it should not be propagated +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database1 IS 'citus_classified'; +-- Set a SECURITY LABEL on a table, it should not be propagated +CREATE TABLE a (i int); +SECURITY LABEL ON TABLE a IS 'citus_classified'; +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "database", "provider": "citus '!tests_label_provider"} + worker_1 | + worker_2 | +(3 rows) + +-- Check that only the SECURITY LABEL for ROLES is propagated to the non-main databases on other nodes +\c database_w1 - - :worker_1_port +SELECT provider, objtype, label, objname FROM pg_seclabels ORDER BY objname; + provider | objtype | label | objname +--------------------------------------------------------------------- + citus '!tests_label_provider | role | citus_unclassified | "user 2" + citus '!tests_label_provider | role | citus_classified | user1 +(2 rows) + +-- Check the result after a transaction +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database_w1 IS 'citus_classified'; +COMMIT; +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database_w1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | + worker_1 | {"label": "citus_classified", "objtype": "database", "provider": "citus '!tests_label_provider"} + worker_2 | +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; +ROLLBACK; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +-- clean up +SET citus.enable_create_database_propagation to ON; +DROP DATABASE database1; +DROP DATABASE database2; +DROP DATABASE database_w1; +DROP ROLE user1; +DROP ROLE "user 2"; +RESET citus.enable_create_database_propagation; diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index a7cd6b38c..988fa68be 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -2395,6 +2395,74 @@ SELECT count(*) FROM pg_dist_partition; 0 (1 row) +-- verify a system with a new node won't copy distributed table shards without reference tables +SELECT 1 from master_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT public.wait_until_metadata_sync(30000); + wait_until_metadata_sync +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE r1 (a int PRIMARY KEY, b int); +SELECT create_reference_table('r1'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE d1 (a int PRIMARY KEY, b int); +SELECT create_distributed_table('d1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +ALTER SEQUENCE pg_dist_groupid_seq RESTART WITH 15; +SELECT 1 from master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- count the number of placements for the reference table to verify it is not available on +-- all nodes +SELECT count(*) +FROM pg_dist_shard +JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'r1'::regclass; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- #7426 We can't move shards to the fresh node before we copy reference tables there. +-- rebalance_table_shards() will do the copy, but the low-level +-- citus_move_shard_placement() should raise an error +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; +ERROR: there are missing reference tables on some nodes +SELECT replicate_reference_tables(); + replicate_reference_tables +--------------------------------------------------------------------- + +(1 row) + +-- After replication, the move should succeed. +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE d1, r1; -- verify a system having only reference tables will copy the reference tables when -- executing the rebalancer SELECT 1 from master_remove_node('localhost', :worker_2_port); diff --git a/src/test/regress/expected/system_queries.out b/src/test/regress/expected/system_queries.out new file mode 100644 index 000000000..cd2aef4d2 --- /dev/null +++ b/src/test/regress/expected/system_queries.out @@ -0,0 +1,33 @@ +-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job" +-- along with their details. This modification includes a fix for a null pointer exception that occurred +-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604. +select + ct.conname as constraint_name, + a.attname as column_name, + fc.relname as foreign_table_name, + fns.nspname as foreign_table_schema +from + (SELECT ct.conname, ct.conrelid, ct.confrelid, ct.conkey, ct.contype, +ct.confkey, generate_subscripts(ct.conkey, 1) AS s + FROM pg_constraint ct + ) AS ct + inner join pg_class c on c.oid=ct.conrelid + inner join pg_namespace ns on c.relnamespace=ns.oid + inner join pg_attribute a on a.attrelid=ct.conrelid and a.attnum = +ct.conkey[ct.s] + left join pg_class fc on fc.oid=ct.confrelid + left join pg_namespace fns on fc.relnamespace=fns.oid + left join pg_attribute fa on fa.attrelid=ct.confrelid and fa.attnum = +ct.confkey[ct.s] +where + ct.contype='f' + and fc.relname='pg_dist_background_job' + and ns.nspname='pg_catalog' +order by + fns.nspname, fc.relname, a.attnum; + constraint_name | column_name | foreign_table_name | foreign_table_schema +--------------------------------------------------------------------- + pg_dist_background_task_job_id_fkey | job_id | pg_dist_background_job | pg_catalog + pg_dist_background_task_depend_job_id_fkey | job_id | pg_dist_background_job | pg_catalog +(2 rows) + diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 0401c8f01..ca31b222b 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -60,9 +60,23 @@ ORDER BY 1; function citus_internal.add_colocation_metadata(integer,integer,integer,regtype,oid) function citus_internal.add_object_metadata(text,text[],text[],integer,integer,boolean) function citus_internal.add_partition_metadata(regclass,"char",text,integer,"char") + function citus_internal.add_placement_metadata(bigint,bigint,integer,bigint) + function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) + function citus_internal.add_tenant_schema(oid,integer) + function citus_internal.adjust_local_clock_to_remote(cluster_clock) function citus_internal.commit_management_command_2pc() + function citus_internal.database_command(text) + function citus_internal.delete_colocation_metadata(integer) + function citus_internal.delete_partition_metadata(regclass) + function citus_internal.delete_placement_metadata(bigint) + function citus_internal.delete_shard_metadata(bigint) + function citus_internal.delete_tenant_schema(oid) function citus_internal.execute_command_on_remote_nodes_as_user(text,text) function citus_internal.find_groupid_for_node(text,integer) + function citus_internal.global_blocked_processes() + function citus_internal.is_replication_origin_tracking_active() + function citus_internal.local_blocked_processes() + function citus_internal.mark_node_not_synced(integer,integer) function citus_internal.mark_object_distributed(oid,text,oid,text) function citus_internal.pg_dist_node_trigger_func() function citus_internal.pg_dist_rebalance_strategy_trigger_func() @@ -71,6 +85,12 @@ ORDER BY 1; function citus_internal.replace_isolation_tester_func() function citus_internal.restore_isolation_tester_func() function citus_internal.start_management_transaction(xid8) + function citus_internal.start_replication_origin_tracking() + function citus_internal.stop_replication_origin_tracking() + function citus_internal.unregister_tenant_schema_globally(oid,text) + function citus_internal.update_none_dist_table_metadata(oid,"char",bigint,boolean) + function citus_internal.update_placement_metadata(bigint,integer,integer) + function citus_internal.update_relation_colocation(oid,integer) function citus_internal_add_colocation_metadata(integer,integer,integer,regtype,oid) function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean) function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char") @@ -79,7 +99,6 @@ ORDER BY 1; function citus_internal_add_shard_metadata(regclass,bigint,"char",text,text) function citus_internal_add_tenant_schema(oid,integer) function citus_internal_adjust_local_clock_to_remote(cluster_clock) - function citus_internal_database_command(text) function citus_internal_delete_colocation_metadata(integer) function citus_internal_delete_partition_metadata(regclass) function citus_internal_delete_placement_metadata(bigint) @@ -155,7 +174,7 @@ ORDER BY 1; function citus_text_send_as_jsonb(text) function citus_total_relation_size(regclass,boolean) function citus_truncate_trigger() - function citus_unmark_object_distributed(oid,oid,integer) + function citus_unmark_object_distributed(oid,oid,integer,boolean) function citus_update_node(integer,text,integer,boolean,integer) function citus_update_shard_statistics(bigint) function citus_update_table_statistics(regclass) @@ -352,5 +371,5 @@ ORDER BY 1; view citus_stat_tenants_local view pg_dist_shard_placement view time_partitions -(342 rows) +(361 rows) diff --git a/src/test/regress/expected/upgrade_post_11_after.out b/src/test/regress/expected/upgrade_post_11_after.out index 422bc846f..49bd20432 100644 --- a/src/test/regress/expected/upgrade_post_11_after.out +++ b/src/test/regress/expected/upgrade_post_11_after.out @@ -67,6 +67,20 @@ SELECT 1 FROM run_command_on_workers($$SELECT pg_reload_conf()$$); 1 (2 rows) +-- In the version that we use for upgrade tests (v10.2.0), we propagate +-- "valid until" to the workers as "infinity" even if it's not set. And +-- given that "postgres" role is created in the older version, "valid until" +-- is set to "infinity" on the workers while this is not the case for +-- coordinator. See https://github.com/citusdata/citus/issues/7533. +-- +-- We're fixing this for new versions of Citus and we'll probably backport +-- this to some older versions too. However, v10.2.0 won't ever have this +-- fix. +-- +-- For this reason, here we set "valid until" to "infinity" for all the +-- nodes so that below query doesn't report any difference between the +-- metadata on coordinator and workers. +ALTER ROLE postgres WITH VALID UNTIL 'infinity'; -- make sure that the metadata is consistent across all nodes -- we exclude the distributed_object_data as they are -- not sorted in the same order (as OIDs differ on the nodes) diff --git a/src/test/regress/expected/upgrade_rebalance_strategy_after.out b/src/test/regress/expected/upgrade_rebalance_strategy_after.out index 4036af539..c7ea5cc4e 100644 --- a/src/test/regress/expected/upgrade_rebalance_strategy_after.out +++ b/src/test/regress/expected/upgrade_rebalance_strategy_after.out @@ -1,8 +1,9 @@ SELECT * FROM pg_catalog.pg_dist_rebalance_strategy ORDER BY name; - name | default_strategy | shard_cost_function | node_capacity_function | shard_allowed_on_node_function | default_threshold | minimum_threshold | improvement_threshold + name | default_strategy | shard_cost_function | node_capacity_function | shard_allowed_on_node_function | default_threshold | minimum_threshold | improvement_threshold --------------------------------------------------------------------- - by_disk_size | f | citus_shard_cost_by_disk_size | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0.1 | 0.01 | 0.5 - by_shard_count | f | citus_shard_cost_1 | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0 | 0 | 0 - custom_strategy | t | upgrade_rebalance_strategy.shard_cost_2 | upgrade_rebalance_strategy.capacity_high_worker_1 | upgrade_rebalance_strategy.only_worker_2 | 0.5 | 0.2 | 0.3 -(3 rows) + by_disk_size | f | citus_shard_cost_by_disk_size | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0.1 | 0.01 | 0.5 + by_shard_count | f | citus_shard_cost_1 | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0 | 0 | 0 + custom_strategy | t | upgrade_rebalance_strategy.shard_cost_2 | upgrade_rebalance_strategy.capacity_high_worker_1 | upgrade_rebalance_strategy.only_worker_2 | 0.5 | 0.2 | 0.3 + invalid_strategy | f | 1234567 | upgrade_rebalance_strategy.capacity_high_worker_1 | upgrade_rebalance_strategy.only_worker_2 | 0.5 | 0.2 | 0.3 +(4 rows) diff --git a/src/test/regress/expected/upgrade_rebalance_strategy_before.out b/src/test/regress/expected/upgrade_rebalance_strategy_before.out index cf1d122b3..85b458389 100644 --- a/src/test/regress/expected/upgrade_rebalance_strategy_before.out +++ b/src/test/regress/expected/upgrade_rebalance_strategy_before.out @@ -35,3 +35,23 @@ SELECT citus_set_default_rebalance_strategy('custom_strategy'); (1 row) +-- Disable the trigger temporarily to allow the invalid strategy to be added. +-- Normally an invalid strategy can end up in the table by deleting one of the +-- functions it depends on. But we do directly in this test because we want to +-- have a consistent OID, so we get consistent test output. +ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; +SELECT citus_add_rebalance_strategy( + 'invalid_strategy', + 1234567, + 'capacity_high_worker_1', + 'only_worker_2', + 0.5, + 0.2, + 0.3 + ); + citus_add_rebalance_strategy +--------------------------------------------------------------------- + +(1 row) + +ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; diff --git a/src/test/regress/failure_schedule b/src/test/regress/failure_schedule index e1ad362b5..8b992422e 100644 --- a/src/test/regress/failure_schedule +++ b/src/test/regress/failure_schedule @@ -35,6 +35,7 @@ test: failure_mx_metadata_sync test: failure_mx_metadata_sync_multi_trans test: failure_connection_establishment test: failure_non_main_db_2pc +test: failure_create_database # this test syncs metadata to the workers test: failure_failover_to_local_execution diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index cfff00942..015f74973 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -40,6 +40,7 @@ test: create_drop_database_propagation_pg15 test: create_drop_database_propagation_pg16 test: comment_on_database test: comment_on_role +test: metadata_sync_from_non_maindb # don't parallelize single_shard_table_udfs to make sure colocation ids are sequential test: single_shard_table_udfs test: schema_based_sharding @@ -57,7 +58,7 @@ test: multi_metadata_attributes test: multi_read_from_secondaries -test: grant_on_database_propagation +test: grant_on_database_propagation grant_on_database_propagation_from_non_maindb test: alter_database_propagation test: citus_shards diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index f599363a9..bbb4047a9 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -79,7 +79,7 @@ test: multi_basic_queries cross_join multi_complex_expressions multi_subquery mu test: multi_subquery_complex_reference_clause multi_subquery_window_functions multi_view multi_sql_function multi_prepare_sql test: sql_procedure multi_function_in_join row_types materialized_view test: multi_subquery_in_where_reference_clause adaptive_executor propagate_set_commands geqo -test: forcedelegation_functions +test: forcedelegation_functions system_queries # this should be run alone as it gets too many clients test: join_pushdown test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc statement_cancel_error_message @@ -103,21 +103,24 @@ test: multi_dropped_column_aliases foreign_key_restriction_enforcement test: binary_protocol test: alter_table_set_access_method test: alter_distributed_table -test: issue_5248 issue_5099 issue_5763 issue_6543 issue_6758 +test: issue_5248 issue_5099 issue_5763 issue_6543 issue_6758 issue_7477 issue_7705 test: object_propagation_debug test: undistribute_table test: run_command_on_all_nodes test: background_task_queue_monitor -test: other_databases +test: other_databases grant_role_from_non_maindb role_operations_from_non_maindb seclabel_non_maindb test: citus_internal_access +test: function_with_case_when # Causal clock test test: clock # MERGE tests -test: merge pgmerge merge_repartition2 +test: merge pgmerge +test: merge_repartition2 test: merge_repartition1 merge_schema_sharding test: merge_partition_tables +test: merge_vcore # --------- # test that no tests leaked intermediate results. This should always be last diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index c9a85d523..35671ad26 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -296,10 +296,12 @@ sub generate_hba open(my $fh, ">", catfile($TMP_CHECKDIR, $nodename, "data", "pg_hba.conf")) or die "could not open pg_hba.conf"; - print $fh "host all alice,bob localhost md5\n"; + print $fh "host all alice,bob 127.0.0.1/32 md5\n"; + print $fh "host all alice,bob ::1/128 md5\n"; print $fh "host all all 127.0.0.1/32 trust\n"; print $fh "host all all ::1/128 trust\n"; - print $fh "host replication postgres localhost trust\n"; + print $fh "host replication postgres 127.0.0.1/32 trust\n"; + print $fh "host replication postgres ::1/128 trust\n"; close $fh; } @@ -1126,16 +1128,33 @@ sub RunVanillaTests system("mkdir", ("-p", "$pgregressOutputdir/sql")) == 0 or die "Could not create vanilla sql dir."; - $exitcode = system("$plainRegress", - ("--dlpath", $dlpath), - ("--inputdir", $pgregressInputdir), - ("--outputdir", $pgregressOutputdir), - ("--schedule", catfile("$pgregressInputdir", "parallel_schedule")), - ("--use-existing"), - ("--host","$host"), - ("--port","$masterPort"), - ("--user","$user"), - ("--dbname", "$dbName")); + if ($majorversion >= "16") + { + $exitcode = system("$plainRegress", + ("--dlpath", $dlpath), + ("--inputdir", $pgregressInputdir), + ("--outputdir", $pgregressOutputdir), + ("--expecteddir", $pgregressOutputdir), + ("--schedule", catfile("$pgregressInputdir", "parallel_schedule")), + ("--use-existing"), + ("--host","$host"), + ("--port","$masterPort"), + ("--user","$user"), + ("--dbname", "$dbName")); + } + else + { + $exitcode = system("$plainRegress", + ("--dlpath", $dlpath), + ("--inputdir", $pgregressInputdir), + ("--outputdir", $pgregressOutputdir), + ("--schedule", catfile("$pgregressInputdir", "parallel_schedule")), + ("--use-existing"), + ("--host","$host"), + ("--port","$masterPort"), + ("--user","$user"), + ("--dbname", "$dbName")); + } } if ($useMitmproxy) { diff --git a/src/test/regress/spec/isolation_database_cmd_from_any_node.spec b/src/test/regress/spec/isolation_database_cmd_from_any_node.spec index 1e004cb33..8637a8942 100644 --- a/src/test/regress/spec/isolation_database_cmd_from_any_node.spec +++ b/src/test/regress/spec/isolation_database_cmd_from_any_node.spec @@ -2,11 +2,15 @@ setup { -- OCLASS for database changed in PG 16 from 25 to 26 SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS value INTO oclass_database; + + SELECT 1 FROM citus_add_node('localhost', 57636, 0); } teardown { DROP TABLE IF EXISTS oclass_database; + + select 1 from citus_remove_node('localhost', 57636); } session "s1" diff --git a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec index fce379427..9683935be 100644 --- a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec +++ b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec @@ -90,7 +90,7 @@ step "s2-view-worker" ('%pg_prepared_xacts%'), ('%COMMIT%'), ('%dump_local_%'), - ('%citus_internal_local_blocked_processes%'), + ('%citus_internal.local_blocked_processes%'), ('%add_node%'), ('%csa_from_one_node%'), ('%pg_locks%')) diff --git a/src/test/regress/sql/alter_database_propagation.sql b/src/test/regress/sql/alter_database_propagation.sql index 4904919a6..9a8b1fab8 100644 --- a/src/test/regress/sql/alter_database_propagation.sql +++ b/src/test/regress/sql/alter_database_propagation.sql @@ -49,6 +49,7 @@ alter database regression set lock_timeout to DEFAULT; alter database regression RESET lock_timeout; set citus.enable_create_database_propagation=on; +SET citus.next_operation_id TO 3000; create database "regression!'2"; alter database "regression!'2" with CONNECTION LIMIT 100; alter database "regression!'2" with IS_TEMPLATE true CONNECTION LIMIT 50; @@ -90,6 +91,7 @@ set citus.enable_create_database_propagation=on; drop database regression3; +SET citus.next_operation_id TO 3100; create database "regression!'4"; diff --git a/src/test/regress/sql/alter_table_add_column.sql b/src/test/regress/sql/alter_table_add_column.sql index 255e7714f..355667842 100644 --- a/src/test/regress/sql/alter_table_add_column.sql +++ b/src/test/regress/sql/alter_table_add_column.sql @@ -41,6 +41,10 @@ ALTER TABLE referencing ADD COLUMN "test_\'!7" "simple_!\'custom_type"; ALTER TABLE referencing ADD COLUMN test_8 integer CHECK (test_8 > 0); ALTER TABLE referencing ADD COLUMN test_8 integer CONSTRAINT check_test_8 CHECK (test_8 > 0); +-- error out properly even if the REFERENCES does not include the column list of the referenced table +ALTER TABLE referencing ADD COLUMN test_9 bool, ADD COLUMN test_10 int REFERENCES referenced; +ALTER TABLE referencing ADD COLUMN test_9 bool, ADD COLUMN test_10 int REFERENCES referenced(int_col); + -- try to add test_6 again, but with IF NOT EXISTS ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 text; ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 integer; diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql index 2a63194c1..de55258c3 100644 --- a/src/test/regress/sql/create_drop_database_propagation.sql +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -5,7 +5,7 @@ -- For versions >= 16, pg16_create_drop_database_propagation.sql is used. -- Test the UDF that we use to issue database command during metadata sync. -SELECT pg_catalog.citus_internal_database_command(null); +SELECT citus_internal.database_command(null); CREATE ROLE test_db_commands WITH LOGIN; ALTER SYSTEM SET citus.enable_manual_metadata_changes_for_user TO 'test_db_commands'; @@ -14,20 +14,20 @@ SELECT pg_sleep(0.1); SET ROLE test_db_commands; -- fails on null input -SELECT pg_catalog.citus_internal_database_command(null); +SELECT citus_internal.database_command(null); -- fails on non create / drop db command -SELECT pg_catalog.citus_internal_database_command('CREATE TABLE foo_bar(a int)'); -SELECT pg_catalog.citus_internal_database_command('SELECT 1'); -SELECT pg_catalog.citus_internal_database_command('asfsfdsg'); -SELECT pg_catalog.citus_internal_database_command(''); +SELECT citus_internal.database_command('CREATE TABLE foo_bar(a int)'); +SELECT citus_internal.database_command('SELECT 1'); +SELECT citus_internal.database_command('asfsfdsg'); +SELECT citus_internal.database_command(''); RESET ROLE; ALTER ROLE test_db_commands nocreatedb; SET ROLE test_db_commands; --- make sure that pg_catalog.citus_internal_database_command doesn't cause privilege escalation -SELECT pg_catalog.citus_internal_database_command('CREATE DATABASE no_permissions'); +-- make sure that citus_internal.database_command doesn't cause privilege escalation +SELECT citus_internal.database_command('CREATE DATABASE no_permissions'); RESET ROLE; DROP USER test_db_commands; @@ -218,7 +218,8 @@ SELECT * FROM public.check_database_on_all_nodes('my_template_database') ORDER B --tests for special characters in database name set citus.enable_create_database_propagation=on; SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%CREATE DATABASE%'; +set citus.grep_remote_commands = '%DATABASE%'; +SET citus.next_operation_id TO 2000; create database "mydatabase#1'2"; @@ -746,6 +747,63 @@ SELECT 1 FROM run_command_on_all_nodes($$REVOKE ALL ON TABLESPACE pg_default FRO DROP DATABASE no_createdb; DROP USER no_createdb; +-- Test a failure scenario by trying to create a distributed database that +-- already exists on one of the nodes. + +\c - - - :worker_1_port +CREATE DATABASE "test_\!failure"; + +\c - - - :master_port + +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE "test_\!failure"; + +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; + +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure$$) ORDER BY node_type, result; + +SET citus.enable_create_database_propagation TO OFF; +CREATE DATABASE "test_\!failure1"; + +\c - - - :worker_1_port +DROP DATABASE "test_\!failure"; + +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE "test_\!failure1"; + +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; + +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure1$$) ORDER BY node_type, result; + +\c - - - :master_port + +-- Before dropping local "test_\!failure1" database, test a failure scenario +-- by trying to create a distributed database that already exists "on local +-- node" this time. + +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE "test_\!failure1"; + +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; + +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure1$$) ORDER BY node_type, result; + +SET citus.enable_create_database_propagation TO OFF; + +DROP DATABASE "test_\!failure1"; + SET citus.enable_create_database_propagation TO ON; --clean up resources created by this test diff --git a/src/test/regress/sql/create_drop_database_propagation_pg15.sql b/src/test/regress/sql/create_drop_database_propagation_pg15.sql index 40d1b9e09..4e006c54f 100644 --- a/src/test/regress/sql/create_drop_database_propagation_pg15.sql +++ b/src/test/regress/sql/create_drop_database_propagation_pg15.sql @@ -60,6 +60,14 @@ CREATE DATABASE test_locale_provider SELECT * FROM public.check_database_on_all_nodes('test_locale_provider') ORDER BY node_type; +\c test_locale_provider - - :worker_2_port + +set citus.enable_create_database_propagation to on; +create database unsupported_option_from_non_main_db with oid = 12345; + +\c regression - - :master_port + +set citus.enable_create_database_propagation to on; drop database test_locale_provider; \c - - - :master_port diff --git a/src/test/regress/sql/create_ref_dist_from_citus_local.sql b/src/test/regress/sql/create_ref_dist_from_citus_local.sql index 7c10abce6..e9610d65f 100644 --- a/src/test/regress/sql/create_ref_dist_from_citus_local.sql +++ b/src/test/regress/sql/create_ref_dist_from_citus_local.sql @@ -219,8 +219,8 @@ ROLLBACK; -- Test the UDFs that we use to convert Citus local tables to single-shard tables and -- reference tables. -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, true); -SELECT pg_catalog.citus_internal_delete_placement_metadata(1); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, true); +SELECT citus_internal.delete_placement_metadata(1); CREATE ROLE test_user_create_ref_dist WITH LOGIN; GRANT ALL ON SCHEMA create_ref_dist_from_citus_local TO test_user_create_ref_dist; @@ -234,18 +234,18 @@ SET citus.next_placement_id TO 8510000; SET citus.shard_replication_factor TO 1; SET search_path TO create_ref_dist_from_citus_local; -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(null, 't', 1, true); -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, null, 1, true); -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', null, true); -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, null); +SELECT citus_internal.update_none_dist_table_metadata(null, 't', 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, null, 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', null, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, null); -SELECT pg_catalog.citus_internal_delete_placement_metadata(null); +SELECT citus_internal.delete_placement_metadata(null); CREATE TABLE udf_test (col_1 int); SELECT citus_add_local_table_to_metadata('udf_test'); BEGIN; - SELECT pg_catalog.citus_internal_update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); + SELECT citus_internal.update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); SELECT COUNT(*)=1 FROM pg_dist_partition WHERE logicalrelid = 'create_ref_dist_from_citus_local.udf_test'::regclass AND repmodel = 'k' AND colocationid = 99999 AND autoconverted = true; @@ -253,7 +253,7 @@ BEGIN; SELECT placementid AS udf_test_placementid FROM pg_dist_shard_placement WHERE shardid = get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.udf_test') \gset - SELECT pg_catalog.citus_internal_delete_placement_metadata(:udf_test_placementid); + SELECT citus_internal.delete_placement_metadata(:udf_test_placementid); SELECT COUNT(*)=0 FROM pg_dist_placement WHERE placementid = :udf_test_placementid; ROLLBACK; diff --git a/src/test/regress/sql/create_role_propagation.sql b/src/test/regress/sql/create_role_propagation.sql index fa32cf2d2..bd2951b17 100644 --- a/src/test/regress/sql/create_role_propagation.sql +++ b/src/test/regress/sql/create_role_propagation.sql @@ -75,6 +75,8 @@ SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::t \c - - - :master_port +create role test_admin_role; + -- test grants with distributed and non-distributed roles SELECT master_remove_node('localhost', :worker_2_port); @@ -84,6 +86,8 @@ CREATE ROLE dist_role_2; CREATE ROLE dist_role_3; CREATE ROLE dist_role_4; + + SET citus.enable_create_role_propagation TO OFF; CREATE ROLE non_dist_role_1 SUPERUSER; @@ -93,28 +97,71 @@ CREATE ROLE non_dist_role_4; SET citus.enable_create_role_propagation TO ON; + +grant dist_role_3,dist_role_1 to test_admin_role with admin option; + SET ROLE dist_role_1; GRANT non_dist_role_1 TO non_dist_role_2; SET citus.enable_create_role_propagation TO OFF; +grant dist_role_1 to non_dist_role_1 with admin option; SET ROLE non_dist_role_1; -GRANT dist_role_1 TO dist_role_2; +GRANT dist_role_1 TO dist_role_2 granted by non_dist_role_1; RESET ROLE; SET citus.enable_create_role_propagation TO ON; -GRANT dist_role_3 TO non_dist_role_3; + +GRANT dist_role_3 TO non_dist_role_3 granted by test_admin_role; GRANT non_dist_role_4 TO dist_role_4; +GRANT dist_role_3 TO dist_role_4 granted by test_admin_role; + SELECT 1 FROM master_add_node('localhost', :worker_2_port); -SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + ) q; + $$ +); + +REVOKE dist_role_3 from dist_role_4 granted by test_admin_role cascade; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + +SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1','test_admin_role')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1; +REVOKE dist_role_3 from non_dist_role_3 granted by test_admin_role cascade; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + +revoke dist_role_3,dist_role_1 from test_admin_role cascade; +drop role test_admin_role; + \c - - - :worker_1_port SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; diff --git a/src/test/regress/sql/failure_create_database.sql b/src/test/regress/sql/failure_create_database.sql new file mode 100644 index 000000000..d117dc811 --- /dev/null +++ b/src/test/regress/sql/failure_create_database.sql @@ -0,0 +1,128 @@ +SET citus.enable_create_database_propagation TO ON; +SET client_min_messages TO WARNING; + +SELECT 1 FROM citus_add_node('localhost', :master_port, 0); + +CREATE FUNCTION get_temp_databases_on_nodes() +RETURNS TEXT AS $func$ + SELECT array_agg(DISTINCT result ORDER BY result) AS temp_databases_on_nodes FROM run_command_on_all_nodes($$SELECT datname FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$) WHERE result != ''; +$func$ +LANGUAGE sql; + +CREATE FUNCTION count_db_cleanup_records() +RETURNS TABLE(object_name TEXT, count INTEGER) AS $func$ + SELECT object_name, COUNT(*) FROM pg_dist_cleanup WHERE object_name LIKE 'citus_temp_database_%' GROUP BY object_name; +$func$ +LANGUAGE sql; + +CREATE FUNCTION ensure_no_temp_databases_on_any_nodes() +RETURNS BOOLEAN AS $func$ + SELECT bool_and(result::boolean) AS no_temp_databases_on_any_nodes FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +$func$ +LANGUAGE sql; + +-- cleanup any orphaned resources from previous runs +CALL citus_cleanup_orphaned_resources(); + +SET citus.next_operation_id TO 4000; + +ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1; +SELECT pg_reload_conf(); +SELECT pg_sleep(0.1); + +SELECT citus.mitmproxy('conn.kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^CREATE DATABASE").cancel(' || pg_backend_pid() || ')'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^ALTER DATABASE").cancel(' || pg_backend_pid() || ')'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +-- not call citus_cleanup_orphaned_resources() but recover the prepared transactions this time +SELECT 1 FROM recover_prepared_transactions(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +DROP DATABASE db1; + +-- after recovering the prepared transactions, cleanup records should also be removed +SELECT * FROM count_db_cleanup_records(); + +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT citus_internal.acquire_citus_advisory_object_class_lock").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onParse(query="^WITH distributed_object_data").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +CREATE DATABASE db1; + +-- show that a successful database creation doesn't leave any pg_dist_cleanup records behind +SELECT * FROM count_db_cleanup_records(); + +DROP DATABASE db1; + +DROP FUNCTION get_temp_databases_on_nodes(); +DROP FUNCTION ensure_no_temp_databases_on_any_nodes(); +DROP FUNCTION count_db_cleanup_records(); + +SELECT 1 FROM citus_remove_node('localhost', :master_port); diff --git a/src/test/regress/sql/failure_mx_metadata_sync.sql b/src/test/regress/sql/failure_mx_metadata_sync.sql index 90e882fe5..d8f82296f 100644 --- a/src/test/regress/sql/failure_mx_metadata_sync.sql +++ b/src/test/regress/sql/failure_mx_metadata_sync.sql @@ -56,10 +56,10 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; -- Check failures on DDL command propagation CREATE TABLE t2 (id int PRIMARY KEY); -SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_placement_metadata").kill()'); +SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_placement_metadata").kill()'); SELECT create_distributed_table('t2', 'id'); -SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_shard_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_shard_metadata").cancel(' || :pid || ')'); SELECT create_distributed_table('t2', 'id'); -- Verify that the table was not distributed diff --git a/src/test/regress/sql/failure_mx_metadata_sync_multi_trans.sql b/src/test/regress/sql/failure_mx_metadata_sync_multi_trans.sql index e2f6e60c3..afe6a64e9 100644 --- a/src/test/regress/sql/failure_mx_metadata_sync_multi_trans.sql +++ b/src/test/regress/sql/failure_mx_metadata_sync_multi_trans.sql @@ -285,15 +285,15 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_partition_ SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to add shard metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").cancel(' || :pid || ')'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").kill()'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to add placement metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").cancel(' || :pid || ')'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").kill()'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to add colocation metadata diff --git a/src/test/regress/sql/function_with_case_when.sql b/src/test/regress/sql/function_with_case_when.sql new file mode 100644 index 000000000..03c6678e4 --- /dev/null +++ b/src/test/regress/sql/function_with_case_when.sql @@ -0,0 +1,27 @@ +CREATE SCHEMA function_with_case; +SET search_path TO function_with_case; + +-- create function +CREATE OR REPLACE FUNCTION test_err(v1 text) + RETURNS text + LANGUAGE plpgsql + SECURITY DEFINER +AS $function$ + +begin + return v1 || ' - ok'; +END; +$function$; +do $$ declare + lNewValues text; + val text; +begin + val = 'test'; + lNewValues = test_err(v1 => case when val::text = 'test'::text then 'yes' else 'no' end); + raise notice 'lNewValues= %', lNewValues; +end;$$ ; + +-- call function +SELECT test_err('test'); + +DROP SCHEMA function_with_case CASCADE; diff --git a/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql b/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql new file mode 100644 index 000000000..f83472b36 --- /dev/null +++ b/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql @@ -0,0 +1,246 @@ +-- Public role has connect,temp,temporary privileges on database +-- To test these scenarios, we need to revoke these privileges from public role +-- since public role privileges are inherited by new roles/users +set citus.enable_create_database_propagation to on; +create database test_2pc_db; +show citus.main_db; +revoke connect,temp,temporary on database test_2pc_db from public; + +CREATE SCHEMA grant_on_database_propagation_non_maindb; +SET search_path TO grant_on_database_propagation_non_maindb; + +-- test grant/revoke CREATE privilege propagation on database +create user "myuser'_test"; + +\c test_2pc_db - - :master_port +grant create on database test_2pc_db to "myuser'_test"; + +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + +\c test_2pc_db - - :master_port +revoke create on database test_2pc_db from "myuser'_test"; + +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + +drop user "myuser'_test"; +----------------------------------------------------------------------- + +-- test grant/revoke CONNECT privilege propagation on database +\c regression - - :master_port +create user myuser2; + +\c test_2pc_db - - :master_port +grant CONNECT on database test_2pc_db to myuser2; + +\c regression - - :master_port; +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + +\c test_2pc_db - - :master_port +revoke connect on database test_2pc_db from myuser2; + +\c regression - - :master_port +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + +drop user myuser2; + +----------------------------------------------------------------------- + +-- test grant/revoke TEMP privilege propagation on database +\c regression - - :master_port +create user myuser3; + +-- test grant/revoke temp on database +\c test_2pc_db - - :master_port +grant TEMP on database test_2pc_db to myuser3; + +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + + +\c test_2pc_db - - :worker_1_port +revoke TEMP on database test_2pc_db from myuser3; + +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + +drop user myuser3; + +----------------------------------------------------------------------- + +\c regression - - :master_port +-- test temporary privilege on database +create user myuser4; + +-- test grant/revoke temporary on database +\c test_2pc_db - - :worker_1_port +grant TEMPORARY on database test_2pc_db to myuser4; + +\c regression - - :master_port +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + +\c test_2pc_db - - :master_port +revoke TEMPORARY on database test_2pc_db from myuser4; + +\c regression - - :master_port; +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + +drop user myuser4; +----------------------------------------------------------------------- + +-- test ALL privileges with ALL statement on database +create user myuser5; + +grant ALL on database test_2pc_db to myuser5; + +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port +revoke ALL on database test_2pc_db from myuser5; + +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +drop user myuser5; +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database +create user myuser6; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser6; + +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser6; + +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +drop user myuser6; +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option +create user myuser7; +create user myuser_1; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7; + +set role myuser7; +--here since myuser7 does not have grant option, it should fail +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1; + +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port + +RESET ROLE; + +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7 with grant option; +set role myuser7; + +--here since myuser have grant option, it should succeed +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1 granted by myuser7; + +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port + +RESET ROLE; + +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict; +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict ; + +--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 cascade ; + +--here we test if myuser7 still have the privileges after revoke grant option for + +\c regression - - :master_port +select check_database_privileges('myuser7','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +reset role; + +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser_1; + +\c regression - - :master_port +drop user myuser_1; +drop user myuser7; + +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database +-- and multi user +\c regression - - :master_port +create user myuser8; +create user myuser_2; + +set citus.enable_create_database_propagation to on; +create database test_db; + +revoke connect,temp,temporary on database test_db from public; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db to myuser8,myuser_2; + +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +RESET ROLE; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 ; + +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser_2; + +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 cascade; + +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +reset role; + +\c regression - - :master_port +drop user myuser_2; +drop user myuser8; + +set citus.enable_create_database_propagation to on; +drop database test_db; + +--------------------------------------------------------------------------- +-- rollbacks public role database privileges to original state +grant connect,temp,temporary on database test_2pc_db to public; +drop database test_2pc_db; +set citus.enable_create_database_propagation to off; +DROP SCHEMA grant_on_database_propagation_non_maindb CASCADE; + +reset citus.enable_create_database_propagation; +reset search_path; +--------------------------------------------------------------------------- diff --git a/src/test/regress/sql/grant_role_from_non_maindb.sql b/src/test/regress/sql/grant_role_from_non_maindb.sql new file mode 100644 index 000000000..b74b5092d --- /dev/null +++ b/src/test/regress/sql/grant_role_from_non_maindb.sql @@ -0,0 +1,147 @@ +CREATE SCHEMA grant_role2pc; +SET search_path TO grant_role2pc; +set citus.enable_create_database_propagation to on; + +CREATE DATABASE grant_role2pc_db; + +\c grant_role2pc_db +SHOW citus.main_db; + +SET citus.superuser TO 'postgres'; +CREATE USER grant_role2pc_user1; +CREATE USER grant_role2pc_user2; +CREATE USER grant_role2pc_user3; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; +CREATE USER grant_role2pc_user6; +CREATE USER grant_role2pc_user7; + +\c grant_role2pc_db + +--test with empty superuser +SET citus.superuser TO ''; +grant grant_role2pc_user1 to grant_role2pc_user2; + +SET citus.superuser TO 'postgres'; +grant grant_role2pc_user1 to grant_role2pc_user2 with admin option granted by CURRENT_USER; + +\c regression + +select result FROM run_command_on_all_nodes( + $$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text = 'grant_role2pc_user2' + order by member::regrole::text, roleid::regrole::text + ) t + $$ +); + +\c grant_role2pc_db +--test grant under transactional context with multiple operations +BEGIN; +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user3 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user4 granted by grant_role2pc_user3 ; +COMMIT; + +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION granted by grant_role2pc_user3; +grant grant_role2pc_user1 to grant_role2pc_user6; +ROLLBACK; + + + +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user7; +SELECT 1/0; +commit; + + +\c regression + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + + +\c grant_role2pc_db + +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; + +\c regression + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + +\c grant_role2pc_db +revoke admin option for grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3; + +--test revoke under transactional context with multiple operations +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3 ; +revoke grant_role2pc_user1 from grant_role2pc_user4 granted by grant_role2pc_user3; +COMMIT; +\c grant_role2pc_db - - :worker_1_port +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; +revoke grant_role2pc_user1 from grant_role2pc_user3 cascade; +COMMIT; + +\c regression + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user2','grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + +\c grant_role2pc_db - - :worker_1_port +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user6; +COMMIT; + +\c regression - - :master_port + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6') + order by member::regrole::text, roleid::regrole::text +) t +$$); + +revoke grant_role2pc_user1 from grant_role2pc_user5,grant_role2pc_user6; + +--clean resources +DROP SCHEMA grant_role2pc; +set citus.enable_create_database_propagation to on; +DROP DATABASE grant_role2pc_db; +drop user grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7; +drop user grant_role2pc_user1; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/sql/issue_7477.sql b/src/test/regress/sql/issue_7477.sql new file mode 100644 index 000000000..b9c1578e9 --- /dev/null +++ b/src/test/regress/sql/issue_7477.sql @@ -0,0 +1,44 @@ + +--- Test for updating a table that has a foreign key reference to another reference table. +--- Issue #7477: Distributed deadlock after issuing a simple UPDATE statement +--- https://github.com/citusdata/citus/issues/7477 + +CREATE TABLE table1 (id INT PRIMARY KEY); +SELECT create_reference_table('table1'); +INSERT INTO table1 VALUES (1); + +CREATE TABLE table2 ( + id INT, + info TEXT, + CONSTRAINT table1_id_fk FOREIGN KEY (id) REFERENCES table1 (id) + ); +SELECT create_reference_table('table2'); +INSERT INTO table2 VALUES (1, 'test'); + +--- Runs the update command in parallel on workers. +--- Due to bug #7477, before the fix, the result is non-deterministic +--- and have several rows of the form: +--- localhost | 57638 | f | ERROR: deadlock detected +--- localhost | 57637 | f | ERROR: deadlock detected +--- localhost | 57637 | f | ERROR: canceling the transaction since it was involved in a distributed deadlock + +SELECT * FROM master_run_on_worker( + ARRAY['localhost', 'localhost','localhost', 'localhost','localhost', + 'localhost','localhost', 'localhost','localhost', 'localhost']::text[], + ARRAY[57638, 57637, 57637, 57638, 57637, 57638, 57637, 57638, 57638, 57637]::int[], + ARRAY['UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1' + ]::text[], + true); + +--- cleanup +DROP TABLE table2; +DROP TABLE table1; diff --git a/src/test/regress/sql/issue_7705.sql b/src/test/regress/sql/issue_7705.sql new file mode 100644 index 000000000..950933017 --- /dev/null +++ b/src/test/regress/sql/issue_7705.sql @@ -0,0 +1,72 @@ +--- Test for verifying that column references (var nodes) in targets that cannot be pushed down +--- do not cause issues for the postgres planner, in particular postgres versions 16+, where the +--- varnullingrels field of a VAR node may contain relids of join relations that can make the var +--- NULL; in a rewritten distributed query without a join such relids do not have a meaning. +--- Issue #7705: [SEGFAULT] Querying distributed tables with window partition causes segmentation fault +--- https://github.com/citusdata/citus/issues/7705 + +CREATE SCHEMA issue_7705; +SET search_path to 'issue_7705'; +SET citus.next_shard_id TO 30070000; +SET citus.shard_replication_factor TO 1; +SET citus.enable_local_execution TO ON; + +CREATE TABLE t1 (id INT PRIMARY KEY); +INSERT INTO t1 VALUES (1), (2); + +CREATE TABLE t2 (id INT, account_id INT, a2 INT, PRIMARY KEY(id, account_id)); +INSERT INTO t2 VALUES (3, 1, 10), (4, 2, 20), (5, 1, NULL); + +SELECT create_distributed_table('t1', 'id'); +SELECT create_distributed_table('t2', 'account_id'); + +-- Test the issue seen in #7705; a target expression with +-- a window function that cannot be pushed down because the +-- partion by is not on the distribution column also includes +-- a column from the inner side of a left outer join, which +-- produces a non-empty varnullingrels set in PG 16 (and higher) +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; + +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id; +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id; + +SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; + +CREATE SEQUENCE test_seq START 101; +CREATE OR REPLACE FUNCTION TEST_F(int) returns INT language sql stable as $$ select $1 + 42; $$ ; + +-- Issue #7705 also occurs if a target expression includes a column +-- of a distributed table that is on the inner side of a left outer +-- join and a call to nextval(), because nextval() cannot be pushed +-- down, and must be run on the coordinator +SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; + +SELECT t1.id, CASE nextval('test_seq') % 2 = 0 WHEN true THEN t2.a2 ELSE 1 END +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, CASE nextval('test_seq') %2 = 0 WHEN true THEN t2.a2 ELSE 1 END +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; + +--- cleanup +\set VERBOSITY TERSE +DROP SCHEMA issue_7705 CASCADE; +RESET all; diff --git a/src/test/regress/sql/merge.sql b/src/test/regress/sql/merge.sql index a41e80841..5316b5233 100644 --- a/src/test/regress/sql/merge.sql +++ b/src/test/regress/sql/merge.sql @@ -1206,6 +1206,139 @@ SET citus.log_remote_commands to false; SELECT compare_tables(); ROLLBACK; + +-- let's create source and target table +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 13000; +CREATE TABLE source_pushdowntest (id integer); +CREATE TABLE target_pushdowntest (id integer ); + +-- let's distribute both table on id field +SELECT create_distributed_table('source_pushdowntest', 'id'); +SELECT create_distributed_table('target_pushdowntest', 'id'); + +-- we are doing this operation on single node setup let's figure out colocation id of both tables +-- both has same colocation id so both are colocated. +WITH colocations AS ( + SELECT colocationid + FROM pg_dist_partition + WHERE logicalrelid = 'source_pushdowntest'::regclass + OR logicalrelid = 'target_pushdowntest'::regclass +) +SELECT + CASE + WHEN COUNT(DISTINCT colocationid) = 1 THEN 'Same' + ELSE 'Different' + END AS colocation_status +FROM colocations; + +SET client_min_messages TO DEBUG1; +-- Test 1 : tables are colocated AND query is multisharded AND Join On distributed column : should push down to workers. + +EXPLAIN (costs off, timing off, summary off) +MERGE INTO target_pushdowntest t +USING source_pushdowntest s +ON t.id = s.id +WHEN NOT MATCHED THEN + INSERT (id) + VALUES (s.id); + +-- Test 2 : tables are colocated AND source query is not multisharded : should push down to worker. +-- DEBUG LOGS show that query is getting pushed down +MERGE INTO target_pushdowntest t +USING (SELECT * from source_pushdowntest where id = 1) s +on t.id = s.id +WHEN NOT MATCHED THEN + INSERT (id) + VALUES (s.id); + + +-- Test 3 : tables are colocated source query is single sharded but not using source distributed column in insertion. let's not pushdown. +INSERT INTO source_pushdowntest (id) VALUES (3); + +EXPLAIN (costs off, timing off, summary off) +MERGE INTO target_pushdowntest t +USING (SELECT 1 as somekey, id from source_pushdowntest where id = 1) s +on t.id = s.somekey +WHEN NOT MATCHED THEN + INSERT (id) + VALUES (s.somekey); + + +-- let's verify if we use some other column from source for value of distributed column in target. +-- it should be inserted to correct shard of target. +CREATE TABLE source_withdata (id integer, some_number integer); +CREATE TABLE target_table (id integer, name text); +SELECT create_distributed_table('source_withdata', 'id'); +SELECT create_distributed_table('target_table', 'id'); + +INSERT INTO source_withdata (id, some_number) VALUES (1, 3); + +-- we will use some_number column from source_withdata to insert into distributed column of target. +-- value of some_number is 3 let's verify what shard it should go to. +select worker_hash(3); + +-- it should go to second shard of target as target has 4 shard and hash "-28094569" comes in range of second shard. +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN NOT MATCHED THEN + INSERT (id, name) + VALUES (s.some_number, 'parag'); + +-- let's verify if data inserted to second shard of target. +EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM target_table; + +-- let's verify target data too. +SELECT * FROM target_table; + + +-- test UPDATE : when source is single sharded and table are colocated +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN MATCHED THEN + UPDATE SET name = 'parag jain'; + +-- let's verify if data updated properly. +SELECT * FROM target_table; + +-- let's see what happend when we try to update distributed key of target table +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN MATCHED THEN + UPDATE SET id = 1500; + +SELECT * FROM target_table; + +-- test DELETE : when source is single sharded and table are colocated +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN MATCHED THEN + DELETE; + +-- let's verify if data deleted properly. +SELECT * FROM target_table; + +-- +DELETE FROM source_withdata; +DELETE FROM target_table; +INSERT INTO source VALUES (1,1); + +merge into target_table sda +using source_withdata sdn +on sda.id = sdn.id AND sda.id = 1 +when not matched then + insert (id) + values (10000); + +SELECT * FROM target_table WHERE id = 10000; + +RESET client_min_messages; + + + -- This will prune shards with restriction information as NOT MATCHED is void BEGIN; SET citus.log_remote_commands to true; diff --git a/src/test/regress/sql/merge_vcore.sql b/src/test/regress/sql/merge_vcore.sql new file mode 100644 index 000000000..2ab95e874 --- /dev/null +++ b/src/test/regress/sql/merge_vcore.sql @@ -0,0 +1,403 @@ +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q +\endif + +-- MERGE command performs a join from data_source to target_table_name +DROP SCHEMA IF EXISTS merge_vcore_schema CASCADE; +--MERGE INTO target +--USING source +--WHEN NOT MATCHED +--WHEN MATCHED AND +--WHEN MATCHED + +CREATE SCHEMA merge_vcore_schema; +SET search_path TO merge_vcore_schema; +SET citus.shard_count TO 4; +SET citus.next_shard_id TO 4000000; +SET citus.explain_all_tasks TO true; +SET citus.shard_replication_factor TO 1; +SET citus.max_adaptive_executor_pool_size TO 1; +SET client_min_messages = warning; +SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); +RESET client_min_messages; + + +-- ****************************************** CASE 1 : Both are singleSharded*************************************** +CREATE TABLE source ( + id bigint, + doc text +); + +CREATE TABLE target ( + id bigint, + doc text +); + +SELECT create_distributed_table('source', null, colocate_with=>'none'); +SELECT create_distributed_table('target', null, colocate_with=>'none'); + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); + +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- *************** CASE 2 : source is single sharded and target is distributed ******************************* +CREATE TABLE source ( + id bigint, + doc text +); + +CREATE TABLE target ( + id bigint, + doc text +); + +SELECT create_distributed_table('source', null, colocate_with=>'none'); +SELECT create_distributed_table('target', 'id'); + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); + +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + + + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- *************** CASE 3 : source is distributed and target is single sharded ******************************* +CREATE TABLE source ( + id bigint, + doc text +); + +CREATE TABLE target ( + id bigint, + doc text +); + +SELECT create_distributed_table('source', 'id'); +SELECT create_distributed_table('target', null); + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); + +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- *************** CASE 4 : both are distributed ******************************* +CREATE TABLE source ( + id bigint, + doc text +); + +CREATE TABLE target ( + id bigint, + doc text +); + +SELECT create_distributed_table('source', 'id'); +SELECT create_distributed_table('target', 'id'); + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); + +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- *************** CASE 5 : both are distributed & colocated ******************************* + +CREATE TABLE source ( + id bigint, + doc text +); + +CREATE TABLE target ( + id bigint, + doc text +); + +SELECT create_distributed_table('source', 'id'); +SELECT create_distributed_table('target', 'id', colocate_with=>'source'); + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); + +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- *************** CASE 6 : both are singlesharded & colocated ******************************* + +CREATE TABLE source ( + id bigint, + doc text +); + +CREATE TABLE target ( + id bigint, + doc text +); + +SELECT create_distributed_table('source', null); +SELECT create_distributed_table('target', null, colocate_with=>'source'); + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); + +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- Bug Fix Test as part of this PR +-- Test 1 +CREATE TABLE source ( + id int, + age int, + salary int +); + +CREATE TABLE target ( + id int, + age int, + salary int +); + +SELECT create_distributed_table('source', 'id', colocate_with=>'none'); +SELECT create_distributed_table('target', 'id', colocate_with=>'none'); + +INSERT INTO source (id, age, salary) VALUES (1,30, 100000); + +MERGE INTO ONLY target USING source ON (source.id = target.id) +WHEN NOT MATCHED THEN +INSERT (id, salary) VALUES (source.id, source.salary); + +SELECT * FROM TARGET; +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- Test 2 +CREATE TABLE source ( + id int, + age int, + salary int +); + +CREATE TABLE target ( + id int, + age int, + salary int +); + +SELECT create_distributed_table('source', 'id', colocate_with=>'none'); +SELECT create_distributed_table('target', 'id', colocate_with=>'none'); + +INSERT INTO source (id, age, salary) VALUES (1,30, 100000); + +MERGE INTO ONLY target USING source ON (source.id = target.id) +WHEN NOT MATCHED THEN +INSERT (salary, id) VALUES (source.salary, source.id); + +SELECT * FROM TARGET; +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- Test 3 +CREATE TABLE source ( + id int, + age int, + salary int +); + +CREATE TABLE target ( + id int, + age int, + salary int +); + +SELECT create_distributed_table('source', 'id', colocate_with=>'none'); +SELECT create_distributed_table('target', 'id', colocate_with=>'none'); + +INSERT INTO source (id, age, salary) VALUES (1,30, 100000); + +MERGE INTO ONLY target USING source ON (source.id = target.id) +WHEN NOT MATCHED THEN +INSERT (salary, id, age) VALUES (source.age, source.id, source.salary); + +SELECT * FROM TARGET; +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + + +DROP SCHEMA IF EXISTS merge_vcore_schema CASCADE; + + + + diff --git a/src/test/regress/sql/metadata_sync_from_non_maindb.sql b/src/test/regress/sql/metadata_sync_from_non_maindb.sql new file mode 100644 index 000000000..62760c6cc --- /dev/null +++ b/src/test/regress/sql/metadata_sync_from_non_maindb.sql @@ -0,0 +1,188 @@ +CREATE SCHEMA metadata_sync_2pc_schema; +SET search_path TO metadata_sync_2pc_schema; +set citus.enable_create_database_propagation to on; +CREATE DATABASE metadata_sync_2pc_db; + +revoke connect,temp,temporary on database metadata_sync_2pc_db from public; + +\c metadata_sync_2pc_db +SHOW citus.main_db; + +CREATE USER "grant_role2pc'_user1"; +CREATE USER "grant_role2pc'_user2"; +CREATE USER "grant_role2pc'_user3"; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; + +\c regression +select 1 from citus_remove_node('localhost', :worker_2_port); + +\c metadata_sync_2pc_db +grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION; +-- This section was originally testing a scenario where a user with the 'admin option' grants the same role to another user, also with the 'admin option'. +-- However, we encountered inconsistent errors because the 'admin option' grant is executed after the grant below. +-- Once we establish the correct order of granting, we will reintroduce the 'granted by' clause. +-- For now, we are commenting out the grant below that includes 'granted by', and instead, we are adding a grant without the 'granted by' clause. +-- grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5; + +--test for grant on database +\c metadata_sync_2pc_db - - :master_port +grant create on database metadata_sync_2pc_db to "grant_role2pc'_user1"; +grant connect on database metadata_sync_2pc_db to "grant_role2pc'_user2"; +grant ALL on database metadata_sync_2pc_db to "grant_role2pc'_user3"; + +\c regression +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + +-- test for security label on role +\c metadata_sync_2pc_db - - :master_port +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE grant_role2pc_user4 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "grant_role2pc'_user1" IS 'citus_classified'; + +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + +set citus.enable_create_database_propagation to on; +select 1 from citus_add_node('localhost', :worker_2_port); + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('"grant_role2pc''_user2"','"grant_role2pc''_user3"','grant_role2pc_user4','grant_role2pc_user5') + order by member::regrole::text +) t +$$); + +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + +\c metadata_sync_2pc_db +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 ; + +revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; + +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke ALL on database metadata_sync_2pc_db from "grant_role2pc'_user3"; +revoke CONNECT on database metadata_sync_2pc_db from "grant_role2pc'_user2"; +revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1"; + +\c regression + +drop user "grant_role2pc'_user1","grant_role2pc'_user2","grant_role2pc'_user3",grant_role2pc_user4,grant_role2pc_user5; +--test for user operations + +--test for create user +\c regression - - :master_port +select 1 from citus_remove_node('localhost', :worker_2_port); + +\c metadata_sync_2pc_db - - :master_port +CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1'; + +\c metadata_sync_2pc_db - - :worker_1_port +CREATE USER "test_role2-needs\!escape" +WITH + SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION +LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1; + +create role test_role3; + +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + +--test for alter user +select 1 from citus_remove_node('localhost', :worker_2_port); +\c metadata_sync_2pc_db - - :master_port +-- Test ALTER ROLE with various options +ALTER ROLE test_role1 WITH PASSWORD 'new_password1'; + +\c metadata_sync_2pc_db - - :worker_1_port +ALTER USER "test_role2-needs\!escape" +WITH + NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION +LIMIT 5 VALID UNTIL '2024-01-01'; + +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + +--test for drop user +select 1 from citus_remove_node('localhost', :worker_2_port); + +\c metadata_sync_2pc_db - - :worker_1_port +DROP ROLE test_role1, "test_role2-needs\!escape"; + +\c metadata_sync_2pc_db - - :master_port +DROP ROLE test_role3; + +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + +-- Clean up: drop the database on worker node 2 +\c regression - - :worker_2_port +DROP ROLE if exists test_role1, "test_role2-needs\!escape", test_role3; + +\c regression - - :master_port + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + +set citus.enable_create_database_propagation to on; +drop database metadata_sync_2pc_db; +drop schema metadata_sync_2pc_schema; +reset citus.enable_create_database_propagation; +reset search_path; diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index d04fc96c3..dae331d25 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -16,7 +16,7 @@ CREATE TABLE test(col_1 int); -- not in a distributed transaction SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); -SELECT citus_internal_update_relation_colocation ('test'::regclass, 1); +SELECT citus_internal.update_relation_colocation ('test'::regclass, 1); -- in a distributed transaction, but the application name is not Citus BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -54,7 +54,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); + SELECT citus_internal.update_relation_colocation ('test'::regclass, 10); ROLLBACK; -- finally, a user can only add its own tables to the metadata @@ -202,7 +202,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); + SELECT citus_internal.update_placement_metadata(1420007, 10000, 11111); ROLLBACK; -- non-existing users should fail to pass the checks @@ -276,7 +276,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- the user is only allowed to add a shard for add a table which is in pg_dist_partition @@ -286,7 +286,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- ok, now add the table to the pg_dist_partition @@ -302,7 +302,7 @@ COMMIT; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); + SELECT citus_internal.update_relation_colocation ('test_2'::regclass, 1231231232); ROLLBACK; -- invalid shard ids are not allowed @@ -312,7 +312,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- invalid storage types are not allowed @@ -322,7 +322,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- NULL shard ranges are not allowed for hash distributed tables @@ -332,7 +332,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- non-integer shard ranges are not allowed @@ -342,7 +342,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- shardMinValue should be smaller than shardMaxValue @@ -352,7 +352,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- we do not allow overlapping shards for the same table @@ -364,7 +364,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text), ('test_2'::regclass, 1420002::bigint, 't'::"char", '10'::text, '50'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- Now let's check valid pg_dist_object updates @@ -482,7 +482,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- we do not allow NULL shardMinMax values @@ -494,12 +494,12 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -- manually ingest NULL values, otherwise not likely unless metadata is corrupted UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 1420000; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; \c - metadata_sync_helper_role - :worker_1_port @@ -518,14 +518,14 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_2'::regclass, 1420004::bigint, 't'::"char", '51'::text, '60'::text), ('test_2'::regclass, 1420005::bigint, 't'::"char", '61'::text, '70'::text), ('test_3'::regclass, 1420008::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; COMMIT; -- we cannot mark these two tables colocated because they are not colocated BEGIN; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- now, add few more shards for test_3 to make it colocated with test_2 @@ -539,7 +539,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_3'::regclass, 1420011::bigint, 't'::"char", '41'::text, '50'::text), ('test_3'::regclass, 1420012::bigint, 't'::"char", '51'::text, '60'::text), ('test_3'::regclass, 1420013::bigint, 't'::"char", '61'::text, '70'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; COMMIT; -- shardMin/MaxValues should be NULL for reference tables @@ -549,7 +549,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- reference tables cannot have multiple shards @@ -560,7 +560,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), ('test_ref'::regclass, 1420007::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- finally, add a shard for reference tables @@ -570,7 +570,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; COMMIT; \c - postgres - :worker_1_port @@ -583,7 +583,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; COMMIT; \c - metadata_sync_helper_role - :worker_1_port @@ -596,9 +596,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS - (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) - SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS + (VALUES (-10, 0::bigint, 1::int, 1500000::bigint)) + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- invalid placementid @@ -608,7 +608,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, 1::int, -10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- non-existing shard @@ -618,7 +618,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1430100, 0::bigint, 1::int, 10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- non-existing node with non-existing node-id 123123123 @@ -628,7 +628,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES ( 1420000, 0::bigint, 123123123::int, 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- create a volatile function that returns the local node id @@ -655,7 +655,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, get_node_id(), 1500000), (1420000, 0::bigint, get_node_id(), 1500001)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- shard is not owned by us @@ -665,7 +665,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420007, 0::bigint, get_node_id(), 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- sucessfully add placements @@ -686,14 +686,14 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1420011, 0::bigint, get_node_id(), 1500009), (1420012, 0::bigint, get_node_id(), 1500010), (1420013, 0::bigint, get_node_id(), 1500011)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; COMMIT; -- we should be able to colocate both tables now BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- try to update placements @@ -703,7 +703,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); COMMIT; -- fails because the source node doesn't contain the shard @@ -711,7 +711,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); COMMIT; -- fails because shard does not exist @@ -719,7 +719,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(0, get_node_id(), get_node_id()+1); COMMIT; -- fails because none-existing shard @@ -727,7 +727,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); COMMIT; -- fails because we do not own the shard @@ -735,7 +735,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(1420007, get_node_id(), get_node_id()+1); COMMIT; -- the user only allowed to delete their own shards @@ -745,7 +745,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; ROLLBACK; -- the user cannot delete non-existing shards @@ -755,7 +755,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420100)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; ROLLBACK; @@ -770,7 +770,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420000)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; SELECT count(*) FROM pg_dist_shard WHERE shardid = 1420000; SELECT count(*) FROM pg_dist_placement WHERE shardid = 1420000; @@ -788,7 +788,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; @@ -810,7 +810,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WHERE logicalrelid = 'test_2'::regclass; \endif - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -820,7 +820,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -830,7 +830,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- colocated hash distributed table should have the same dist key columns diff --git a/src/test/regress/sql/multi_limit_clause.sql b/src/test/regress/sql/multi_limit_clause.sql index 8d14bbbc8..5e3b3e3de 100644 --- a/src/test/regress/sql/multi_limit_clause.sql +++ b/src/test/regress/sql/multi_limit_clause.sql @@ -222,6 +222,25 @@ SELECT ORDER BY 2 DESC, 1 LIMIT 5; +-- check if we can correctly push the limit when it is null +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey LIMIT null; + +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey OFFSET 1 LIMIT null; + +SELECT count(*) FROM lineitem LIMIT null; + +SELECT count(*) FROM lineitem OFFSET 0 LIMIT null; + +-- check if we push the right limit when both offset and limit are given +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey OFFSET 1 LIMIT 3; + +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey OFFSET null LIMIT 1; + +-- check if we can correctly push the limit when it is all +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 2 LIMIT all; + +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 2 OFFSET 2 LIMIT all; + SET client_min_messages TO NOTICE; -- non constants should not push down diff --git a/src/test/regress/sql/multi_mx_hide_shard_names.sql b/src/test/regress/sql/multi_mx_hide_shard_names.sql index e5213a41b..addc7f90e 100644 --- a/src/test/regress/sql/multi_mx_hide_shard_names.sql +++ b/src/test/regress/sql/multi_mx_hide_shard_names.sql @@ -50,6 +50,24 @@ prepare transaction 'take-aggressive-lock'; -- shards are hidden when using psql as application_name SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; +-- Even when using subquery and having no existing quals on pg_clcass +SELECT relname FROM (SELECT relname, relnamespace FROM pg_catalog.pg_class) AS q WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; + +-- Check that inserts into pg_class don't add the filter +EXPLAIN (COSTS OFF) INSERT INTO pg_class VALUES (1); +-- Unless it's an INSERT SELECT that queries from pg_class; +EXPLAIN (COSTS OFF) INSERT INTO pg_class SELECT * FROM pg_class; + +-- Check that query that psql "\d test_table" does gets optimized to an index +-- scan +EXPLAIN (COSTS OFF) SELECT c.oid, + n.nspname, + c.relname +FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace +WHERE c.relname OPERATOR(pg_catalog.~) '^(test_table)$' COLLATE pg_catalog.default + AND pg_catalog.pg_table_is_visible(c.oid) +ORDER BY 2, 3; commit prepared 'take-aggressive-lock'; diff --git a/src/test/regress/sql/multi_test_helpers.sql b/src/test/regress/sql/multi_test_helpers.sql index e67b782a5..7d218361c 100644 --- a/src/test/regress/sql/multi_test_helpers.sql +++ b/src/test/regress/sql/multi_test_helpers.sql @@ -652,3 +652,17 @@ BEGIN JOIN pg_dist_node USING (nodeid); END; $func$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION check_database_privileges(role_name text, db_name text, permissions text[]) +RETURNS TABLE(permission text, result text) +AS $func$ +DECLARE + permission text; +BEGIN + FOREACH permission IN ARRAY permissions + LOOP + RETURN QUERY EXECUTE format($inner$SELECT %s, result FROM run_command_on_all_nodes($$select has_database_privilege(%s,%s,%s); $$)$inner$, + quote_literal(permission), quote_literal(role_name), quote_literal(db_name), quote_literal(permission)); + END LOOP; +END; +$func$ LANGUAGE plpgsql; diff --git a/src/test/regress/sql/node_conninfo_reload.sql b/src/test/regress/sql/node_conninfo_reload.sql index 42ba8c9b1..2faaaeeb1 100644 --- a/src/test/regress/sql/node_conninfo_reload.sql +++ b/src/test/regress/sql/node_conninfo_reload.sql @@ -205,4 +205,30 @@ show citus.node_conninfo; -- Should work again ALTER TABLE test ADD COLUMN e INT; +-- show that we allow providing "host" param via citus.node_conninfo +ALTER SYSTEM SET citus.node_conninfo = 'sslmode=require host=nosuchhost'; +SELECT pg_reload_conf(); +SELECT pg_sleep(0.1); + +-- fails due to invalid host +SELECT COUNT(*)>=0 FROM test; + +SELECT array_agg(nodeid) as updated_nodeids from pg_dist_node WHERE nodename = 'localhost' \gset +UPDATE pg_dist_node SET nodename = '127.0.0.1' WHERE nodeid = ANY(:'updated_nodeids'::int[]); + +ALTER SYSTEM SET citus.node_conninfo = 'sslmode=require host=localhost'; +SELECT pg_reload_conf(); +SELECT pg_sleep(0.1); + +-- works when hostaddr is specified in pg_dist_node after providing host in citus.node_conninfo +SELECT COUNT(*)>=0 FROM test; + +-- restore original nodenames into pg_dist_node +UPDATE pg_dist_node SET nodename = 'localhost' WHERE nodeid = ANY(:'updated_nodeids'::int[]); + +-- reset it +ALTER SYSTEM RESET citus.node_conninfo; +select pg_reload_conf(); +select pg_sleep(0.1); -- wait for config reload to apply + DROP SCHEMA node_conninfo_reload CASCADE; diff --git a/src/test/regress/sql/other_databases.sql b/src/test/regress/sql/other_databases.sql index 8cd54f354..aa936e507 100644 --- a/src/test/regress/sql/other_databases.sql +++ b/src/test/regress/sql/other_databases.sql @@ -75,9 +75,9 @@ DROP USER other_db_user9, nonsuperuser; -- test from a worker \c - - - :worker_1_port -CREATE DATABASE other_db2; +CREATE DATABASE worker_other_db; -\c other_db2 +\c worker_other_db CREATE USER worker_user1; @@ -98,9 +98,85 @@ SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1; -- some user creation commands will fail but let's make sure we try to drop them just in case DROP USER IF EXISTS worker_user1, worker_user2, worker_user3; -\c - - - :worker_1_port -DROP DATABASE other_db2; +-- test creating and dropping a database from a Citus non-main database +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); +\c other_db1 +CREATE DATABASE other_db3; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + +\c other_db1 +DROP DATABASE other_db3; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + +\c worker_other_db - - :worker_1_port +CREATE DATABASE other_db4; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + +\c worker_other_db +DROP DATABASE other_db4; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + +DROP DATABASE worker_other_db; + +CREATE DATABASE other_db5; + +-- disable create database propagation for the next test +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +\c other_db5 - - :worker_2_port + +-- locally create a database +CREATE DATABASE local_db; + +\c regression - - - + +-- re-enable create database propagation +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +\c other_db5 - - :master_port + +-- Test a scenario where create database fails because the database +-- already exists on another node and we don't crash etc. +CREATE DATABASE local_db; + +\c regression - - - + +SELECT * FROM public.check_database_on_all_nodes('local_db') ORDER BY node_type, result; + +\c - - - :worker_2_port + +-- locally drop the database for cleanup purposes +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +DROP DATABASE local_db; + +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + \c - - - :master_port +DROP DATABASE other_db5; + +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + DROP SCHEMA other_databases; DROP DATABASE other_db1; diff --git a/src/test/regress/sql/pg16.sql b/src/test/regress/sql/pg16.sql index 99024edcb..0312fcdff 100644 --- a/src/test/regress/sql/pg16.sql +++ b/src/test/regress/sql/pg16.sql @@ -58,8 +58,8 @@ CREATE TABLE tenk1 ( SELECT create_distributed_table('tenk1', 'unique1'); SET citus.log_remote_commands TO on; -EXPLAIN (GENERIC_PLAN) SELECT unique1 FROM tenk1 WHERE thousand = 1000; -EXPLAIN (GENERIC_PLAN, ANALYZE) SELECT unique1 FROM tenk1 WHERE thousand = 1000; +EXPLAIN (GENERIC_PLAN) SELECT unique1 FROM tenk1 WHERE thousand = $1; +EXPLAIN (GENERIC_PLAN, ANALYZE) SELECT unique1 FROM tenk1 WHERE thousand = $1; SET citus.log_remote_commands TO off; -- Proper error when creating statistics without a name on a Citus table diff --git a/src/test/regress/sql/remove_coordinator.sql b/src/test/regress/sql/remove_coordinator.sql index b0df327d1..35a8a5718 100644 --- a/src/test/regress/sql/remove_coordinator.sql +++ b/src/test/regress/sql/remove_coordinator.sql @@ -1,5 +1,41 @@ -- removing coordinator from pg_dist_node should update pg_dist_colocation SELECT master_remove_node('localhost', :master_port); +-- to silence -potentially flaky- "could not establish connection after" warnings in below test +SET client_min_messages TO ERROR; + +-- to fail fast when the hostname is not resolvable, as it will be the case below +SET citus.node_connection_timeout to '1s'; + +BEGIN; + SET application_name TO 'new_app_name'; + + -- that should fail because of bad hostname & port + SELECT citus_add_node('200.200.200.200', 1, 200); + + -- Since above command failed, now Postgres will need to revert the + -- application_name change made in this transaction and this will + -- happen within abort-transaction callback, so we won't be in a + -- transaction block while Postgres does that. + -- + -- And when the application_name changes, Citus tries to re-assign + -- the global pid but it does so only for Citus internal backends, + -- and doing so for Citus internal backends doesn't require being + -- in a transaction block and is safe. + -- + -- However, for the client external backends (like us here), Citus + -- doesn't re-assign the global pid because it's not needed and it's + -- not safe to do so outside of a transaction block. This is because, + -- it would require performing a catalog access to retrive the local + -- node id when the cached local node is invalidated like what just + -- happened here because of the failed citus_add_node() call made + -- above. + -- + -- So by failing here (rather than crashing), we ensure this behavior. +ROLLBACK; + +RESET client_min_messages; +RESET citus.node_connection_timeout; + -- restore coordinator for the rest of the tests SELECT citus_set_coordinator_host('localhost', :master_port); diff --git a/src/test/regress/sql/role_operations_from_non_maindb.sql b/src/test/regress/sql/role_operations_from_non_maindb.sql new file mode 100644 index 000000000..5f569208b --- /dev/null +++ b/src/test/regress/sql/role_operations_from_non_maindb.sql @@ -0,0 +1,106 @@ +-- Create a new database +set citus.enable_create_database_propagation to on; +CREATE DATABASE role_operations_test_db; +SET citus.superuser TO 'postgres'; +-- Connect to the new database +\c role_operations_test_db +-- Test CREATE ROLE with various options +CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1'; + +\c role_operations_test_db - - :worker_1_port +CREATE USER "test_role2-needs\!escape" +WITH + SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION +LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1; + +\c regression - - :master_port + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape') + ORDER BY rolname + ) t +$$); + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT r.rolname + FROM pg_dist_object d + JOIN pg_roles r ON d.objid = r.oid + WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape') + order by r.rolname + ) t +$$); + +\c role_operations_test_db - - :master_port +-- Test ALTER ROLE with various options +ALTER ROLE test_role1 WITH PASSWORD 'new_password1'; + +\c role_operations_test_db - - :worker_1_port +ALTER USER "test_role2-needs\!escape" +WITH + NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION +LIMIT 5 VALID UNTIL '2024-01-01'; + +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape') + ORDER BY rolname + ) t +$$); + +\c role_operations_test_db - - :master_port +-- Test DROP ROLE +DROP ROLE no_such_role; -- fails nicely +DROP ROLE IF EXISTS no_such_role; -- doesn't fail + +CREATE ROLE new_role; +DROP ROLE IF EXISTS no_such_role, new_role; -- doesn't fail +DROP ROLE IF EXISTS test_role1, "test_role2-needs\!escape"; + +\c regression - - :master_port +--verify that roles and dist_object are dropped +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role') + ORDER BY rolname + ) t +$$); + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT r.rolname + FROM pg_roles r + WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role') + order by r.rolname + ) t +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT count(*) leaked_pg_dist_object_records_for_roles + FROM pg_dist_object LEFT JOIN pg_authid ON (objid = oid) + WHERE classid = 1260 AND oid IS NULL +$$); + +-- Clean up: drop the database +set citus.enable_create_database_propagation to on; +DROP DATABASE role_operations_test_db; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/sql/schema_based_sharding.sql b/src/test/regress/sql/schema_based_sharding.sql index bd8065ab9..f0b2276df 100644 --- a/src/test/regress/sql/schema_based_sharding.sql +++ b/src/test/regress/sql/schema_based_sharding.sql @@ -12,15 +12,15 @@ SET client_min_messages TO NOTICE; -- Verify that the UDFs used to sync tenant schema metadata to workers -- fail on NULL input. -SELECT citus_internal_add_tenant_schema(NULL, 1); -SELECT citus_internal_add_tenant_schema(1, NULL); -SELECT citus_internal_delete_tenant_schema(NULL); -SELECT citus_internal_unregister_tenant_schema_globally(1, NULL); -SELECT citus_internal_unregister_tenant_schema_globally(NULL, 'text'); +SELECT citus_internal.add_tenant_schema(NULL, 1); +SELECT citus_internal.add_tenant_schema(1, NULL); +SELECT citus_internal.delete_tenant_schema(NULL); +SELECT citus_internal.unregister_tenant_schema_globally(1, NULL); +SELECT citus_internal.unregister_tenant_schema_globally(NULL, 'text'); --- Verify that citus_internal_unregister_tenant_schema_globally can only +-- Verify that citus_internal.unregister_tenant_schema_globally can only -- be called on schemas that are dropped already. -SELECT citus_internal_unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); +SELECT citus_internal.unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); @@ -1022,9 +1022,9 @@ SELECT pg_reload_conf(); ALTER SYSTEM SET citus.enable_schema_based_sharding TO ON; SELECT pg_reload_conf(); --- Verify that citus_internal_unregister_tenant_schema_globally is a no-op +-- Verify that citus_internal.unregister_tenant_schema_globally is a no-op -- on workers. -SELECT citus_internal_unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); +SELECT citus_internal.unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); \c - - - :master_port diff --git a/src/test/regress/sql/seclabel.sql b/src/test/regress/sql/seclabel.sql index e523fc1da..d39e01183 100644 --- a/src/test/regress/sql/seclabel.sql +++ b/src/test/regress/sql/seclabel.sql @@ -62,14 +62,20 @@ SET citus.grep_remote_commands = '%SECURITY LABEL%'; SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; SECURITY LABEL ON ROLE user1 IS NULL; SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; -SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; \c - - - :worker_1_port --- command not allowed from worker node -SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified'; +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +-- command from the worker node should be propagated to the coordinator +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; -\c - - - :master_port RESET citus.log_remote_commands; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; +\c - - - :master_port SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; @@ -82,6 +88,19 @@ SELECT 1 FROM citus_add_node('localhost', :worker_2_port); SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; +-- disable the GUC and check that the command is not propagated +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + +\c - - - :worker_2_port +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus ''!unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +RESET citus.enable_alter_role_propagation; + -- cleanup RESET citus.log_remote_commands; DROP ROLE user1, "user 2"; diff --git a/src/test/regress/sql/seclabel_non_maindb.sql b/src/test/regress/sql/seclabel_non_maindb.sql new file mode 100644 index 000000000..1833d4193 --- /dev/null +++ b/src/test/regress/sql/seclabel_non_maindb.sql @@ -0,0 +1,71 @@ +-- SECLABEL +-- +-- Test suite for running SECURITY LABEL ON ROLE statements from non-main databases + +SET citus.enable_create_database_propagation to ON; + +CREATE DATABASE database1; +CREATE DATABASE database2; + +\c - - - :worker_1_port +SET citus.enable_create_database_propagation to ON; +CREATE DATABASE database_w1; + + +\c - - - :master_port +CREATE ROLE user1; +\c database1 +SHOW citus.main_db; +SHOW citus.superuser; + +CREATE ROLE "user 2"; + +-- Set a SECURITY LABEL on a role from a non-main database +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_unclassified'; + +-- Check the result +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +\c database1 +-- Set a SECURITY LABEL on database, it should not be propagated +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database1 IS 'citus_classified'; + +-- Set a SECURITY LABEL on a table, it should not be propagated +CREATE TABLE a (i int); +SECURITY LABEL ON TABLE a IS 'citus_classified'; + +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database1') ORDER BY node_type; + +-- Check that only the SECURITY LABEL for ROLES is propagated to the non-main databases on other nodes +\c database_w1 - - :worker_1_port +SELECT provider, objtype, label, objname FROM pg_seclabels ORDER BY objname; + + +-- Check the result after a transaction +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database_w1 IS 'citus_classified'; +COMMIT; + +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database_w1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; +ROLLBACK; + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +-- clean up +SET citus.enable_create_database_propagation to ON; +DROP DATABASE database1; +DROP DATABASE database2; +DROP DATABASE database_w1; +DROP ROLE user1; +DROP ROLE "user 2"; +RESET citus.enable_create_database_propagation; diff --git a/src/test/regress/sql/shard_rebalancer.sql b/src/test/regress/sql/shard_rebalancer.sql index 5d8e89b36..9037f8f75 100644 --- a/src/test/regress/sql/shard_rebalancer.sql +++ b/src/test/regress/sql/shard_rebalancer.sql @@ -1340,6 +1340,43 @@ DROP TABLE t1, r1, r2; -- test suites should clean up their distributed tables. SELECT count(*) FROM pg_dist_partition; +-- verify a system with a new node won't copy distributed table shards without reference tables + +SELECT 1 from master_remove_node('localhost', :worker_2_port); +SELECT public.wait_until_metadata_sync(30000); + +CREATE TABLE r1 (a int PRIMARY KEY, b int); +SELECT create_reference_table('r1'); + +CREATE TABLE d1 (a int PRIMARY KEY, b int); +SELECT create_distributed_table('d1', 'a'); + +ALTER SEQUENCE pg_dist_groupid_seq RESTART WITH 15; +SELECT 1 from master_add_node('localhost', :worker_2_port); + +-- count the number of placements for the reference table to verify it is not available on +-- all nodes +SELECT count(*) +FROM pg_dist_shard +JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'r1'::regclass; + +-- #7426 We can't move shards to the fresh node before we copy reference tables there. +-- rebalance_table_shards() will do the copy, but the low-level +-- citus_move_shard_placement() should raise an error +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; + +SELECT replicate_reference_tables(); + +-- After replication, the move should succeed. +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; + +DROP TABLE d1, r1; + -- verify a system having only reference tables will copy the reference tables when -- executing the rebalancer diff --git a/src/test/regress/sql/system_queries.sql b/src/test/regress/sql/system_queries.sql new file mode 100644 index 000000000..1e1d86876 --- /dev/null +++ b/src/test/regress/sql/system_queries.sql @@ -0,0 +1,27 @@ +-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job" +-- along with their details. This modification includes a fix for a null pointer exception that occurred +-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604. +select + ct.conname as constraint_name, + a.attname as column_name, + fc.relname as foreign_table_name, + fns.nspname as foreign_table_schema +from + (SELECT ct.conname, ct.conrelid, ct.confrelid, ct.conkey, ct.contype, +ct.confkey, generate_subscripts(ct.conkey, 1) AS s + FROM pg_constraint ct + ) AS ct + inner join pg_class c on c.oid=ct.conrelid + inner join pg_namespace ns on c.relnamespace=ns.oid + inner join pg_attribute a on a.attrelid=ct.conrelid and a.attnum = +ct.conkey[ct.s] + left join pg_class fc on fc.oid=ct.confrelid + left join pg_namespace fns on fc.relnamespace=fns.oid + left join pg_attribute fa on fa.attrelid=ct.confrelid and fa.attnum = +ct.confkey[ct.s] +where + ct.contype='f' + and fc.relname='pg_dist_background_job' + and ns.nspname='pg_catalog' +order by + fns.nspname, fc.relname, a.attnum; diff --git a/src/test/regress/sql/upgrade_post_11_after.sql b/src/test/regress/sql/upgrade_post_11_after.sql index ba9b12f3b..6d948ec34 100644 --- a/src/test/regress/sql/upgrade_post_11_after.sql +++ b/src/test/regress/sql/upgrade_post_11_after.sql @@ -27,6 +27,21 @@ SET datestyle = "ISO, YMD"; SELECT 1 FROM run_command_on_workers($$ALTER SYSTEM SET datestyle = "ISO, YMD";$$); SELECT 1 FROM run_command_on_workers($$SELECT pg_reload_conf()$$); +-- In the version that we use for upgrade tests (v10.2.0), we propagate +-- "valid until" to the workers as "infinity" even if it's not set. And +-- given that "postgres" role is created in the older version, "valid until" +-- is set to "infinity" on the workers while this is not the case for +-- coordinator. See https://github.com/citusdata/citus/issues/7533. +-- +-- We're fixing this for new versions of Citus and we'll probably backport +-- this to some older versions too. However, v10.2.0 won't ever have this +-- fix. +-- +-- For this reason, here we set "valid until" to "infinity" for all the +-- nodes so that below query doesn't report any difference between the +-- metadata on coordinator and workers. +ALTER ROLE postgres WITH VALID UNTIL 'infinity'; + -- make sure that the metadata is consistent across all nodes -- we exclude the distributed_object_data as they are -- not sorted in the same order (as OIDs differ on the nodes) diff --git a/src/test/regress/sql/upgrade_rebalance_strategy_before.sql b/src/test/regress/sql/upgrade_rebalance_strategy_before.sql index 458fb9cf6..be2012e9c 100644 --- a/src/test/regress/sql/upgrade_rebalance_strategy_before.sql +++ b/src/test/regress/sql/upgrade_rebalance_strategy_before.sql @@ -29,3 +29,19 @@ SELECT citus_add_rebalance_strategy( 0.3 ); SELECT citus_set_default_rebalance_strategy('custom_strategy'); + +-- Disable the trigger temporarily to allow the invalid strategy to be added. +-- Normally an invalid strategy can end up in the table by deleting one of the +-- functions it depends on. But we do directly in this test because we want to +-- have a consistent OID, so we get consistent test output. +ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; +SELECT citus_add_rebalance_strategy( + 'invalid_strategy', + 1234567, + 'capacity_high_worker_1', + 'only_worker_2', + 0.5, + 0.2, + 0.3 + ); +ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger;