Merge branch 'main' into contributing-dev

pull/7347/head
Onur Tirtir 2025-02-04 11:54:47 +03:00 committed by GitHub
commit 4e2dbd555e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
218 changed files with 9709 additions and 1382 deletions

View File

@ -3,5 +3,5 @@
\pset border 2
\setenv PAGER 'pspg --no-mouse -bX --no-commandbar --no-topbar'
\set HISTSIZE 100000
\set PROMPT1 '\n%[%033[1m%]%M %n@%/:%>-%p%R%[%033[0m%]%# '
\set PROMPT1 '\n%[%033[1m%]%M %n@%/:%> (PID: %p)%R%[%033[0m%]%# '
\set PROMPT2 ' '

View File

@ -16,5 +16,25 @@
}
],
},
]
{
"name": "Open core file",
"type": "cppdbg",
"request": "launch",
"program": "/home/citus/.pgenv/pgsql/bin/postgres",
"coreDumpPath": "${input:corefile}",
"cwd": "${workspaceFolder}",
"MIMode": "gdb",
}
],
"inputs": [
{
"id": "corefile",
"type": "command",
"command": "extension.commandvariable.file.pickFile",
"args": {
"dialogTitle": "Select core file",
"include": "**/core*",
},
},
],
}

View File

@ -68,7 +68,7 @@ USER citus
# build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions
FROM base AS pg14
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.10
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.15
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf
FROM base AS pg15
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.5
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.10
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf
FROM base AS pg16
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.1
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.6
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
@ -152,6 +152,7 @@ RUN sudo apt update \
lsof \
man \
net-tools \
psmisc \
pspg \
tree \
vim \
@ -210,7 +211,7 @@ COPY --chown=citus:citus .psqlrc .
RUN sudo chown --from=root:root citus:citus -R ~
# sets default pg version
RUN pgenv switch 16.1
RUN pgenv switch 16.6
# make connecting to the coordinator easy
ENV PGPORT=9700

View File

@ -2,8 +2,11 @@
"image": "ghcr.io/citusdata/citus-devcontainer:main",
"runArgs": [
"--cap-add=SYS_PTRACE",
"--ulimit=core=-1",
],
"forwardPorts": [
9700
],
"forwardPorts": [9700],
"customizations": {
"vscode": {
"extensions": [
@ -14,6 +17,7 @@
"github.vscode-pull-request-github",
"ms-vscode.cpptools-extension-pack",
"ms-vsliveshare.vsliveshare",
"rioj7.command-variable",
],
"settings": {
"files.exclude": {
@ -30,3 +34,4 @@
"updateContentCommand": "./configure",
"postCreateCommand": "make -C .devcontainer/",
}

View File

@ -5,7 +5,7 @@ verify_ssl = true
[packages]
mitmproxy = {editable = true, ref = "main", git = "https://github.com/citusdata/mitmproxy.git"}
construct = "==2.9.45"
construct = "*"
docopt = "==0.6.2"
cryptography = ">=41.0.4"
pytest = "*"
@ -16,6 +16,7 @@ pytest-timeout = "*"
pytest-xdist = "*"
pytest-repeat = "*"
pyyaml = "*"
werkzeug = "==2.3.7"
[dev-packages]
black = "*"

View File

@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
"sha256": "b92bf682aeeea1a66a16beaf78584a5318fd0ae908ce85c7e2a4807aa2bee532"
"sha256": "f8db86383082539f626f1402e720f5f2e3f9718b44a8f26110cf9f52e7ca46bc"
},
"pipfile-spec": 6,
"requires": {
@ -119,11 +119,11 @@
},
"certifi": {
"hashes": [
"sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082",
"sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"
"sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f",
"sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"
],
"markers": "python_version >= '3.6'",
"version": "==2023.7.22"
"version": "==2024.2.2"
},
"cffi": {
"hashes": [
@ -180,7 +180,7 @@
"sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956",
"sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"
],
"markers": "python_version >= '3.8'",
"markers": "platform_python_implementation != 'PyPy'",
"version": "==1.16.0"
},
"click": {
@ -193,40 +193,51 @@
},
"construct": {
"hashes": [
"sha256:2271a0efd0798679dea825ff47e22a4c550456a5db0ba8baa82f7eae0af0118c"
"sha256:4d2472f9684731e58cc9c56c463be63baa1447d674e0d66aeb5627b22f512c29",
"sha256:c80be81ef595a1a821ec69dc16099550ed22197615f4320b57cc9ce2a672cb30"
],
"index": "pypi",
"version": "==2.9.45"
"markers": "python_version >= '3.6'",
"version": "==2.10.70"
},
"cryptography": {
"hashes": [
"sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67",
"sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311",
"sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8",
"sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13",
"sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143",
"sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f",
"sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829",
"sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd",
"sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397",
"sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac",
"sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d",
"sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a",
"sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839",
"sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e",
"sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6",
"sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9",
"sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860",
"sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca",
"sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91",
"sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d",
"sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714",
"sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb",
"sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f"
"sha256:04859aa7f12c2b5f7e22d25198ddd537391f1695df7057c8700f71f26f47a129",
"sha256:069d2ce9be5526a44093a0991c450fe9906cdf069e0e7cd67d9dee49a62b9ebe",
"sha256:0d3ec384058b642f7fb7e7bff9664030011ed1af8f852540c76a1317a9dd0d20",
"sha256:0fab2a5c479b360e5e0ea9f654bcebb535e3aa1e493a715b13244f4e07ea8eec",
"sha256:0fea01527d4fb22ffe38cd98951c9044400f6eff4788cf52ae116e27d30a1ba3",
"sha256:1b797099d221df7cce5ff2a1d272761d1554ddf9a987d3e11f6459b38cd300fd",
"sha256:1e935c2900fb53d31f491c0de04f41110351377be19d83d908c1fd502ae8daa5",
"sha256:20100c22b298c9eaebe4f0b9032ea97186ac2555f426c3e70670f2517989543b",
"sha256:20180da1b508f4aefc101cebc14c57043a02b355d1a652b6e8e537967f1e1b46",
"sha256:25b09b73db78facdfd7dd0fa77a3f19e94896197c86e9f6dc16bce7b37a96504",
"sha256:2619487f37da18d6826e27854a7f9d4d013c51eafb066c80d09c63cf24505306",
"sha256:2eb6368d5327d6455f20327fb6159b97538820355ec00f8cc9464d617caecead",
"sha256:35772a6cffd1f59b85cb670f12faba05513446f80352fe811689b4e439b5d89e",
"sha256:39d5c93e95bcbc4c06313fc6a500cee414ee39b616b55320c1904760ad686938",
"sha256:3d96ea47ce6d0055d5b97e761d37b4e84195485cb5a38401be341fabf23bc32a",
"sha256:4dcab7c25e48fc09a73c3e463d09ac902a932a0f8d0c568238b3696d06bf377b",
"sha256:5fbf0f3f0fac7c089308bd771d2c6c7b7d53ae909dce1db52d8e921f6c19bb3a",
"sha256:6c25e1e9c2ce682d01fc5e2dde6598f7313027343bd14f4049b82ad0402e52cd",
"sha256:762f3771ae40e111d78d77cbe9c1035e886ac04a234d3ee0856bf4ecb3749d54",
"sha256:90147dad8c22d64b2ff7331f8d4cddfdc3ee93e4879796f837bdbb2a0b141e0c",
"sha256:935cca25d35dda9e7bd46a24831dfd255307c55a07ff38fd1a92119cffc34857",
"sha256:93fbee08c48e63d5d1b39ab56fd3fdd02e6c2431c3da0f4edaf54954744c718f",
"sha256:9541c69c62d7446539f2c1c06d7046aef822940d248fa4b8962ff0302862cc1f",
"sha256:c23f03cfd7d9826cdcbad7850de67e18b4654179e01fe9bc623d37c2638eb4ef",
"sha256:c3d1f5a1d403a8e640fa0887e9f7087331abb3f33b0f2207d2cc7f213e4a864c",
"sha256:d1998e545081da0ab276bcb4b33cce85f775adb86a516e8f55b3dac87f469548",
"sha256:d5cf11bc7f0b71fb71af26af396c83dfd3f6eed56d4b6ef95d57867bf1e4ba65",
"sha256:db0480ffbfb1193ac4e1e88239f31314fe4c6cdcf9c0b8712b55414afbf80db4",
"sha256:de4ae486041878dc46e571a4c70ba337ed5233a1344c14a0790c4c4be4bbb8b4",
"sha256:de5086cd475d67113ccb6f9fae6d8fe3ac54a4f9238fd08bfdb07b03d791ff0a",
"sha256:df34312149b495d9d03492ce97471234fd9037aa5ba217c2a6ea890e9166f151",
"sha256:ead69ba488f806fe1b1b4050febafdbf206b81fa476126f3e16110c818bac396"
],
"index": "pypi",
"markers": "python_version >= '3.7'",
"version": "==41.0.4"
"version": "==42.0.3"
},
"docopt": {
"hashes": [
@ -237,11 +248,11 @@
},
"exceptiongroup": {
"hashes": [
"sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9",
"sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"
"sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14",
"sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"
],
"markers": "python_version < '3.11'",
"version": "==1.1.3"
"version": "==1.2.0"
},
"execnet": {
"hashes": [
@ -253,12 +264,12 @@
},
"filelock": {
"hashes": [
"sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4",
"sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd"
"sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e",
"sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==3.12.4"
"version": "==3.13.1"
},
"flask": {
"hashes": [
@ -318,11 +329,11 @@
},
"jinja2": {
"hashes": [
"sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852",
"sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"
"sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa",
"sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"
],
"markers": "python_version >= '3.7'",
"version": "==3.1.2"
"version": "==3.1.3"
},
"kaitaistruct": {
"hashes": [
@ -342,69 +353,69 @@
},
"markupsafe": {
"hashes": [
"sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e",
"sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e",
"sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431",
"sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686",
"sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c",
"sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559",
"sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc",
"sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb",
"sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939",
"sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c",
"sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0",
"sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4",
"sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9",
"sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575",
"sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba",
"sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d",
"sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd",
"sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3",
"sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00",
"sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155",
"sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac",
"sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52",
"sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f",
"sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8",
"sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b",
"sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007",
"sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24",
"sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea",
"sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198",
"sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0",
"sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee",
"sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be",
"sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2",
"sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1",
"sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707",
"sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6",
"sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c",
"sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58",
"sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823",
"sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779",
"sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636",
"sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c",
"sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad",
"sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee",
"sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc",
"sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2",
"sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48",
"sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7",
"sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e",
"sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b",
"sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa",
"sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5",
"sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e",
"sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb",
"sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9",
"sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57",
"sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc",
"sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc",
"sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2",
"sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"
"sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf",
"sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff",
"sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f",
"sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3",
"sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532",
"sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f",
"sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617",
"sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df",
"sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4",
"sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906",
"sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f",
"sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4",
"sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8",
"sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371",
"sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2",
"sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465",
"sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52",
"sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6",
"sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169",
"sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad",
"sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2",
"sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0",
"sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029",
"sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f",
"sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a",
"sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced",
"sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5",
"sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c",
"sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf",
"sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9",
"sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb",
"sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad",
"sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3",
"sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1",
"sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46",
"sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc",
"sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a",
"sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee",
"sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900",
"sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5",
"sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea",
"sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f",
"sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5",
"sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e",
"sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a",
"sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f",
"sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50",
"sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a",
"sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b",
"sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4",
"sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff",
"sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2",
"sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46",
"sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b",
"sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf",
"sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5",
"sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5",
"sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab",
"sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd",
"sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"
],
"markers": "python_version >= '3.7'",
"version": "==2.1.3"
"version": "==2.1.5"
},
"mitmproxy": {
"editable": true,
@ -491,11 +502,11 @@
},
"pluggy": {
"hashes": [
"sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12",
"sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"
"sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981",
"sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"
],
"markers": "python_version >= '3.8'",
"version": "==1.3.0"
"version": "==1.4.0"
},
"protobuf": {
"hashes": [
@ -526,12 +537,12 @@
},
"psycopg": {
"hashes": [
"sha256:7542c45810ea16356e5126c9b4291cbc3802aa326fcbba09ff154fe380de29be",
"sha256:cd711edb64b07d7f8a233c365806caf7e55bbe7cbbd8d5c680f672bb5353c8d5"
"sha256:31144d3fb4c17d78094d9e579826f047d4af1da6a10427d91dfcfb6ecdf6f12b",
"sha256:4d5a0a5a8590906daa58ebd5f3cfc34091377354a1acced269dd10faf55da60e"
],
"index": "pypi",
"markers": "python_version >= '3.7'",
"version": "==3.1.11"
"version": "==3.1.18"
},
"publicsuffix2": {
"hashes": [
@ -542,11 +553,11 @@
},
"pyasn1": {
"hashes": [
"sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57",
"sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"
"sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58",
"sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
"version": "==0.5.0"
"version": "==0.5.1"
},
"pycparser": {
"hashes": [
@ -557,11 +568,11 @@
},
"pyopenssl": {
"hashes": [
"sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2",
"sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac"
"sha256:6aa33039a93fffa4563e655b61d11364d01264be8ccb49906101e02a334530bf",
"sha256:ba07553fb6fd6a7a2259adb9b84e12302a9a8a75c44046e8bb5d3e5ee887e3c3"
],
"markers": "python_version >= '3.6'",
"version": "==23.2.0"
"markers": "python_version >= '3.7'",
"version": "==24.0.0"
},
"pyparsing": {
"hashes": [
@ -579,48 +590,48 @@
},
"pytest": {
"hashes": [
"sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002",
"sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069"
"sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c",
"sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6"
],
"index": "pypi",
"markers": "python_version >= '3.7'",
"version": "==7.4.2"
"markers": "python_version >= '3.8'",
"version": "==8.0.0"
},
"pytest-asyncio": {
"hashes": [
"sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d",
"sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"
"sha256:3a048872a9c4ba14c3e90cc1aa20cbc2def7d01c7c8db3777ec281ba9c057675",
"sha256:4e7093259ba018d58ede7d5315131d21923a60f8a6e9ee266ce1589685c89eac"
],
"index": "pypi",
"markers": "python_version >= '3.7'",
"version": "==0.21.1"
"markers": "python_version >= '3.8'",
"version": "==0.23.5"
},
"pytest-repeat": {
"hashes": [
"sha256:4474a7d9e9137f6d8cc8ae297f8c4168d33c56dd740aa78cfffe562557e6b96e",
"sha256:5cd3289745ab3156d43eb9c8e7f7d00a926f3ae5c9cf425bec649b2fe15bad5b"
],
"index": "pypi",
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==0.9.1"
},
"pytest-timeout": {
"hashes": [
"sha256:c07ca07404c612f8abbe22294b23c368e2e5104b521c1790195561f37e1ac3d9",
"sha256:f6f50101443ce70ad325ceb4473c4255e9d74e3c7cd0ef827309dfa4c0d975c6"
],
"index": "pypi",
"markers": "python_version >= '3.6'",
"version": "==2.1.0"
},
"pytest-xdist": {
"hashes": [
"sha256:d5ee0520eb1b7bcca50a60a518ab7a7707992812c578198f8b44fdfac78e8c93",
"sha256:ff9daa7793569e6a68544850fd3927cd257cc03a7ef76c95e86915355e82b5f2"
"sha256:26ab2df18226af9d5ce441c858f273121e92ff55f5bb311d25755b8d7abdd8ed",
"sha256:ffd3836dfcd67bb270bec648b330e20be37d2966448c4148c4092d1e8aba8185"
],
"index": "pypi",
"markers": "python_version >= '3.7'",
"version": "==3.3.1"
"version": "==0.9.3"
},
"pytest-timeout": {
"hashes": [
"sha256:3b0b95dabf3cb50bac9ef5ca912fa0cfc286526af17afc806824df20c2f72c90",
"sha256:bde531e096466f49398a59f2dde76fa78429a09a12411466f88a07213e220de2"
],
"index": "pypi",
"markers": "python_version >= '3.7'",
"version": "==2.2.0"
},
"pytest-xdist": {
"hashes": [
"sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a",
"sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24"
],
"index": "pypi",
"markers": "python_version >= '3.7'",
"version": "==3.5.0"
},
"pyyaml": {
"hashes": [
@ -653,6 +664,7 @@
"sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4",
"sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba",
"sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8",
"sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef",
"sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5",
"sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd",
"sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3",
@ -693,36 +705,37 @@
"sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001",
"sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462",
"sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9",
"sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe",
"sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b",
"sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b",
"sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615",
"sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62",
"sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15",
"sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b",
"sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1",
"sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9",
"sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675",
"sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1",
"sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899",
"sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7",
"sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7",
"sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312",
"sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa",
"sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f",
"sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91",
"sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa",
"sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b",
"sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6",
"sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3",
"sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334",
"sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5",
"sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3",
"sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe",
"sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3",
"sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c",
"sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed",
"sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337",
"sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880",
"sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f",
"sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d",
"sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248",
"sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d",
"sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279",
"sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf",
"sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512",
"sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069",
@ -731,7 +744,6 @@
"sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d",
"sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31",
"sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92",
"sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd",
"sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5",
"sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28",
"sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d",
@ -760,28 +772,28 @@
},
"tornado": {
"hashes": [
"sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f",
"sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5",
"sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d",
"sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3",
"sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2",
"sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a",
"sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16",
"sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a",
"sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17",
"sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0",
"sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe"
"sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0",
"sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63",
"sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263",
"sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052",
"sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f",
"sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee",
"sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78",
"sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579",
"sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212",
"sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e",
"sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"
],
"markers": "python_version >= '3.8'",
"version": "==6.3.3"
"version": "==6.4"
},
"typing-extensions": {
"hashes": [
"sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0",
"sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"
"sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783",
"sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"
],
"markers": "python_version >= '3.8'",
"version": "==4.8.0"
"version": "==4.9.0"
},
"urwid": {
"hashes": [
@ -791,12 +803,12 @@
},
"werkzeug": {
"hashes": [
"sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc",
"sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"
"sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8",
"sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==3.0.1"
"version": "==2.3.7"
},
"wsproto": {
"hashes": [
@ -864,40 +876,40 @@
"develop": {
"attrs": {
"hashes": [
"sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04",
"sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"
"sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30",
"sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"
],
"markers": "python_version >= '3.7'",
"version": "==23.1.0"
"version": "==23.2.0"
},
"black": {
"hashes": [
"sha256:031e8c69f3d3b09e1aa471a926a1eeb0b9071f80b17689a655f7885ac9325a6f",
"sha256:13a2e4a93bb8ca74a749b6974925c27219bb3df4d42fc45e948a5d9feb5122b7",
"sha256:13ef033794029b85dfea8032c9d3b92b42b526f1ff4bf13b2182ce4e917f5100",
"sha256:14f04c990259576acd093871e7e9b14918eb28f1866f91968ff5524293f9c573",
"sha256:24b6b3ff5c6d9ea08a8888f6977eae858e1f340d7260cf56d70a49823236b62d",
"sha256:403397c033adbc45c2bd41747da1f7fc7eaa44efbee256b53842470d4ac5a70f",
"sha256:50254ebfa56aa46a9fdd5d651f9637485068a1adf42270148cd101cdf56e0ad9",
"sha256:538efb451cd50f43aba394e9ec7ad55a37598faae3348d723b59ea8e91616300",
"sha256:638619a559280de0c2aa4d76f504891c9860bb8fa214267358f0a20f27c12948",
"sha256:6a3b50e4b93f43b34a9d3ef00d9b6728b4a722c997c99ab09102fd5efdb88325",
"sha256:6ccd59584cc834b6d127628713e4b6b968e5f79572da66284532525a042549f9",
"sha256:75a2dc41b183d4872d3a500d2b9c9016e67ed95738a3624f4751a0cb4818fe71",
"sha256:7d30ec46de88091e4316b17ae58bbbfc12b2de05e069030f6b747dfc649ad186",
"sha256:8431445bf62d2a914b541da7ab3e2b4f3bc052d2ccbf157ebad18ea126efb91f",
"sha256:8fc1ddcf83f996247505db6b715294eba56ea9372e107fd54963c7553f2b6dfe",
"sha256:a732b82747235e0542c03bf352c126052c0fbc458d8a239a94701175b17d4855",
"sha256:adc3e4442eef57f99b5590b245a328aad19c99552e0bdc7f0b04db6656debd80",
"sha256:c46767e8df1b7beefb0899c4a95fb43058fa8500b6db144f4ff3ca38eb2f6393",
"sha256:c619f063c2d68f19b2d7270f4cf3192cb81c9ec5bc5ba02df91471d0b88c4c5c",
"sha256:cf3a4d00e4cdb6734b64bf23cd4341421e8953615cba6b3670453737a72ec204",
"sha256:cf99f3de8b3273a8317681d8194ea222f10e0133a24a7548c73ce44ea1679377",
"sha256:d6bc09188020c9ac2555a498949401ab35bb6bf76d4e0f8ee251694664df6301"
"sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8",
"sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8",
"sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd",
"sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9",
"sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31",
"sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92",
"sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f",
"sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29",
"sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4",
"sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693",
"sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218",
"sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a",
"sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23",
"sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0",
"sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982",
"sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894",
"sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540",
"sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430",
"sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b",
"sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2",
"sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6",
"sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==23.9.1"
"version": "==24.2.0"
},
"click": {
"hashes": [
@ -909,30 +921,30 @@
},
"flake8": {
"hashes": [
"sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23",
"sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5"
"sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132",
"sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3"
],
"index": "pypi",
"markers": "python_full_version >= '3.8.1'",
"version": "==6.1.0"
"version": "==7.0.0"
},
"flake8-bugbear": {
"hashes": [
"sha256:90cf04b19ca02a682feb5aac67cae8de742af70538590509941ab10ae8351f71",
"sha256:b182cf96ea8f7a8595b2f87321d7d9b28728f4d9c3318012d896543d19742cb5"
"sha256:663ef5de80cd32aacd39d362212983bc4636435a6f83700b4ed35acbd0b7d1b8",
"sha256:f9cb5f2a9e792dd80ff68e89a14c12eed8620af8b41a49d823b7a33064ac9658"
],
"index": "pypi",
"markers": "python_full_version >= '3.8.1'",
"version": "==23.9.16"
"version": "==24.2.6"
},
"isort": {
"hashes": [
"sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504",
"sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"
"sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109",
"sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"
],
"index": "pypi",
"markers": "python_full_version >= '3.8.0'",
"version": "==5.12.0"
"version": "==5.13.2"
},
"mccabe": {
"hashes": [
@ -960,19 +972,19 @@
},
"pathspec": {
"hashes": [
"sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20",
"sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"
"sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08",
"sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"
],
"markers": "python_version >= '3.7'",
"version": "==0.11.2"
"markers": "python_version >= '3.8'",
"version": "==0.12.1"
},
"platformdirs": {
"hashes": [
"sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3",
"sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e"
"sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068",
"sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"
],
"markers": "python_version >= '3.7'",
"version": "==3.11.0"
"markers": "python_version >= '3.8'",
"version": "==4.2.0"
},
"pycodestyle": {
"hashes": [
@ -984,11 +996,11 @@
},
"pyflakes": {
"hashes": [
"sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774",
"sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc"
"sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f",
"sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"
],
"markers": "python_version >= '3.8'",
"version": "==3.1.0"
"version": "==3.2.0"
},
"tomli": {
"hashes": [
@ -1000,11 +1012,11 @@
},
"typing-extensions": {
"hashes": [
"sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0",
"sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"
"sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783",
"sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"
],
"markers": "python_version >= '3.8'",
"version": "==4.8.0"
"version": "==4.9.0"
}
}
}

View File

@ -24,27 +24,28 @@ jobs:
runs-on: ubuntu-latest
name: Initialize parameters
outputs:
build_image_name: "citus/extbuilder"
test_image_name: "citus/exttester"
citusupgrade_image_name: "citus/citusupgradetester"
fail_test_image_name: "citus/failtester"
pgupgrade_image_name: "citus/pgupgradetester"
style_checker_image_name: "citus/stylechecker"
build_image_name: "ghcr.io/citusdata/extbuilder"
test_image_name: "ghcr.io/citusdata/exttester"
citusupgrade_image_name: "ghcr.io/citusdata/citusupgradetester"
fail_test_image_name: "ghcr.io/citusdata/failtester"
pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester"
style_checker_image_name: "ghcr.io/citusdata/stylechecker"
style_checker_tools_version: "0.8.18"
image_suffix: "-v19b671f"
pg14_version: '{ "major": "14", "full": "14.10" }'
pg15_version: '{ "major": "15", "full": "15.5" }'
pg16_version: '{ "major": "16", "full": "16.1" }'
upgrade_pg_versions: "14.10-15.5-16.1"
sql_snapshot_pg_version: "16.6"
image_suffix: "-v5779674"
pg14_version: '{ "major": "14", "full": "14.15" }'
pg15_version: '{ "major": "15", "full": "15.10" }'
pg16_version: '{ "major": "16", "full": "16.6" }'
upgrade_pg_versions: "14.15-15.10-16.6"
steps:
# Since GHA jobs needs at least one step we use a noop step here.
# Since GHA jobs need at least one step we use a noop step here.
- name: Set up parameters
run: echo 'noop'
check-sql-snapshots:
needs: params
runs-on: ubuntu-20.04
container:
image: ${{ needs.params.outputs.build_image_name }}:latest
image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }}
options: --user root
steps:
- uses: actions/checkout@v3.5.0
@ -61,7 +62,7 @@ jobs:
- name: Check Snapshots
run: |
git config --global --add safe.directory ${GITHUB_WORKSPACE}
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Check C Style
@ -117,7 +118,7 @@ jobs:
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
options: --user root
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
- name: Expose $PG_MAJOR to Github Env
run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
shell: bash
@ -227,7 +228,7 @@ jobs:
- params
- build
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
- uses: "./.github/actions/setup_extension"
- name: Run Test
run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }}
@ -261,7 +262,7 @@ jobs:
- ${{ needs.params.outputs.pg16_version }}
parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
- uses: "./.github/actions/setup_extension"
- name: Test arbitrary configs
run: |-
@ -311,7 +312,7 @@ jobs:
old_pg_major: ${{ matrix.old_pg_major }}
new_pg_major: ${{ matrix.new_pg_major }}
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
- uses: "./.github/actions/setup_extension"
with:
pg_major: "${{ env.old_pg_major }}"
@ -349,7 +350,7 @@ jobs:
- params
- build
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
- uses: "./.github/actions/setup_extension"
with:
skip_installation: true
@ -413,7 +414,7 @@ jobs:
needs:
- build
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
- uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
@ -431,7 +432,7 @@ jobs:
needs:
- build
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
- uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
@ -450,7 +451,7 @@ jobs:
outputs:
json: ${{ steps.parallelization.outputs.json }}
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
- uses: "./.github/actions/parallelization"
id: parallelization
with:
@ -463,7 +464,7 @@ jobs:
outputs:
tests: ${{ steps.detect-regression-tests.outputs.tests }}
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Detect regression tests need to be ran
@ -514,7 +515,7 @@ jobs:
fail-fast: false
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
- uses: "./.github/actions/setup_extension"
- name: Run minimal tests
run: |-

View File

@ -21,7 +21,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v2

View File

@ -28,7 +28,7 @@ jobs:
image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }}
options: --user root
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
- name: Configure, Build, and Install
run: |
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
@ -46,7 +46,7 @@ jobs:
outputs:
json: ${{ steps.parallelization.outputs.json }}
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
- uses: "./.github/actions/parallelization"
id: parallelization
with:
@ -67,7 +67,7 @@ jobs:
fail-fast: false
matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }}
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/checkout@v4
- uses: "./.github/actions/setup_extension"
- name: Run minimal tests
run: |-

View File

@ -19,7 +19,7 @@ jobs:
pg_versions: ${{ steps.get-postgres-versions.outputs.pg_versions }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 2
- name: Get Postgres Versions
@ -51,18 +51,6 @@ jobs:
- almalinux-8
- almalinux-9
POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }}
# Postgres removed support for CentOS 7 in PG 16. Below block is needed to
# keep the build for CentOS 7 working for PG 14 and PG 15.
# Once dependent systems drop support for Centos 7, we can remove this block.
include:
- packaging_docker_image: centos-7
POSTGRES_VERSION: 14
- packaging_docker_image: centos-7
POSTGRES_VERSION: 15
- packaging_docker_image: oraclelinux-7
POSTGRES_VERSION: 14
- packaging_docker_image: oraclelinux-7
POSTGRES_VERSION: 15
container:
image: citus/packaging:${{ matrix.packaging_docker_image }}-pg${{ matrix.POSTGRES_VERSION }}
@ -70,7 +58,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Set Postgres and python parameters for rpm based distros
run: |
@ -128,7 +116,6 @@ jobs:
# for each deb based image and we use POSTGRES_VERSION to set
# PG_CONFIG variable in each of those runs.
packaging_docker_image:
- debian-buster-all
- debian-bookworm-all
- debian-bullseye-all
- ubuntu-focal-all

View File

@ -1,3 +1,213 @@
### citus v13.0.0 (January 17, 2025) ###
* Adds support for PostgreSQL 17 (#7699, #7661)
* Adds `JSON_TABLE()` support in distributed queries (#7816)
* Propagates `MERGE ... WHEN NOT MATCHED BY SOURCE` (#7807)
* Propagates `MEMORY` and `SERIALIZE` options of `EXPLAIN` (#7802)
* Adds support for identity columns in distributed partitioned tables (#7785)
* Allows specifying an access method for distributed partitioned tables (#7818)
* Allows exclusion constraints on distributed partitioned tables (#7733)
* Allows configuring sslnegotiation using `citus.node_conn_info` (#7821)
* Avoids wal receiver timeouts during large shard splits (#7229)
* Fixes a bug causing incorrect writing of data to target `MERGE` repartition
command (#7659)
* Fixes a crash that happens because of unsafe catalog access when re-assigning
the global pid after `application_name` changes (#7791)
* Fixes incorrect `VALID UNTIL` setting assumption made for roles when syncing
them to new nodes (#7534)
* Fixes segfault when calling distributed procedure with a parameterized
distribution argument (#7242)
* Fixes server crash when trying to execute `activate_node_snapshot()` on a
single-node cluster (#7552)
* Improves `citus_move_shard_placement()` to fail early if there is a new node
without reference tables yet (#7467)
### citus v12.1.6 (Nov 14, 2024) ###
* Propagates `SECURITY LABEL .. ON ROLE` statements (#7304)
* Fixes crash caused by running queries with window partition (#7718)
### citus v12.1.5 (July 17, 2024) ###
* Adds support for MERGE commands with single shard distributed target tables
(#7643)
* Fixes an error with MERGE commands when insert value does not have source
distribution column (#7627)
### citus v12.1.4 (May 28, 2024) ###
* Adds null check for node in HasRangeTableRef (#7604)
### citus v12.1.3 (April 18, 2024) ###
* Allows overwriting host name for all inter-node connections by
supporting "host" parameter in citus.node_conninfo (#7541)
* Changes the order in which the locks are acquired for the target and
reference tables, when a modify request is initiated from a worker
node that is not the "FirstWorkerNode" (#7542)
* Fixes a performance issue when distributing a table that depends on an
extension (#7574)
* Fixes a performance issue when using "\d tablename" on a server with
many tables (#7577)
* Fixes a crash caused by some form of ALTER TABLE ADD COLUMN
statements. When adding multiple columns, if one of the ADD COLUMN
statements contains a FOREIGN constraint omitting the referenced
columns in the statement, a SEGFAULT was occurring. (#7522)
* Fixes a performance issue when creating distributed tables if many
already exist (#7575, #7579)
* Fixes a bug when hostname in pg_dist_node resolves to multiple IPs
(#7377)
* Fixes performance issue when tracking foreign key constraints on
systems with many constraints (#7578)
* Fixes segmentation fault when using CASE WHEN in DO block within
functions. (#7554)
* Fixes undefined behavior in master_disable_node due to argument
mismatch (#7492)
* Fixes some potential bugs by correctly marking some variables as
volatile (#7570)
* Logs username in the failed connection message (#7432)
### citus v11.0.10 (February 15, 2024) ###
* Removes pg_send_cancellation and all references (#7135)
### citus v12.1.2 (February 12, 2024) ###
* Fixes the incorrect column count after ALTER TABLE (#7379)
### citus v12.0.1 (July 11, 2023) ###
* Fixes incorrect default value assumption for VACUUM(PROCESS_TOAST) #7122)
* Fixes a bug that causes an unexpected error when adding a column
with a NULL constraint (#7093)
* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152)
* Fixes a bug with deleting colocation groups (#6929)
* Fixes memory and memory contexts leaks in Foreign Constraint Graphs (#7236)
* Fixes shard size bug with too many shards (#7018)
* Fixes the incorrect column count after ALTER TABLE (#7379)
* Improves citus_tables view performance (#7050)
* Makes sure to disallow creating a replicated distributed table
concurrently (#7219)
* Removes pg_send_cancellation and all references (#7135)
### citus v11.3.1 (February 12, 2024) ###
* Disallows MERGE when the query prunes down to zero shards (#6946)
* Fixes a bug related to non-existent objects in DDL commands (#6984)
* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152)
* Fixes a bug with deleting colocation groups (#6929)
* Fixes incorrect results on fetching scrollable with hold cursors (#7014)
* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236)
* Fixes replicate reference tables task fail when user is superuser (#6930)
* Fixes the incorrect column count after ALTER TABLE (#7379)
* Improves citus_shard_sizes performance (#7050)
* Makes sure to disallow creating a replicated distributed table
concurrently (#7219)
* Removes pg_send_cancellation and all references (#7135)
### citus v11.2.2 (February 12, 2024) ###
* Fixes a bug in background shard rebalancer where the replicate
reference tables task fails if the current user is not a superuser (#6930)
* Fixes a bug related to non-existent objects in DDL commands (#6984)
* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152)
* Fixes a bug with deleting colocation groups (#6929)
* Fixes incorrect results on fetching scrollable with hold cursors (#7014)
* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236)
* Fixes the incorrect column count after ALTER TABLE (#7379)
* Improves failure handling of distributed execution (#7090)
* Makes sure to disallow creating a replicated distributed table
concurrently (#7219)
* Removes pg_send_cancellation (#7135)
### citus v11.1.7 (February 12, 2024) ###
* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236)
* Fixes a bug related to non-existent objects in DDL commands (#6984)
* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152)
* Fixes a bug with deleting colocation groups (#6929)
* Fixes incorrect results on fetching scrollable with hold cursors (#7014)
* Fixes the incorrect column count after ALTER TABLE (#7379)
* Improves failure handling of distributed execution (#7090)
* Makes sure to disallow creating a replicated distributed table
concurrently (#7219)
* Removes pg_send_cancellation and all references (#7135)
### citus v11.0.9 (February 12, 2024) ###
* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152)
* Fixes a bug with deleting colocation groups (#6929)
* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236)
* Fixes the incorrect column count after ALTER TABLE (#7462)
* Improve failure handling of distributed execution (#7090)
### citus v12.1.1 (November 9, 2023) ###
* Fixes leaking of memory and memory contexts in Citus foreign key cache

View File

@ -35,6 +35,28 @@ To get citus installed from source we run `make install -s` in the first termina
With the Citus cluster running you can connect to the coordinator in the first terminal via `psql -p9700`. Because the coordinator is the most common entrypoint the `PGPORT` environment is set accordingly, so a simple `psql` will connect directly to the coordinator.
### Debugging in the VS code
1. Start Debugging: Press F5 in VS Code to start debugging. When prompted, you'll need to attach the debugger to the appropriate PostgreSQL process.
2. Identify the Process: If you're running a psql command, take note of the PID that appears in your psql prompt. For example:
```
[local] citus@citus:9700 (PID: 5436)=#
```
This PID (5436 in this case) indicates the process that you should attach the debugger to.
If you are uncertain about which process to attach, you can list all running PostgreSQL processes using the following command:
```
ps aux | grep postgres
```
Look for the process associated with the PID you noted. For example:
```
citus 5436 0.0 0.0 0 0 ? S 14:00 0:00 postgres: citus citus
```
4. Attach the Debugger: Once you've identified the correct PID, select that process when prompted in VS Code to attach the debugger. You should now be able to debug the PostgreSQL session tied to the psql command.
5. Set Breakpoints and Debug: With the debugger attached, you can set breakpoints within the code. This allows you to step through the code execution, inspect variables, and fully debug the PostgreSQL instance running in your container.
### Getting and building
[PostgreSQL documentation](https://www.postgresql.org/support/versioning/) has a

43
DEVCONTAINER.md Normal file
View File

@ -0,0 +1,43 @@
# Devcontainer
## Coredumps
When postgres/citus crashes, there is the option to create a coredump. This is useful for debugging the issue. Coredumps are enabled in the devcontainer by default. However, not all environments are configured correctly out of the box. The most important configuration that is not standardized is the `core_pattern`. The configuration can be verified from the container, however, you cannot change this setting from inside the container as the filesystem containing this setting is in read only mode while inside the container.
To verify if corefiles are written run the following command in a terminal. This shows the filename pattern with which the corefile will be written.
```bash
cat /proc/sys/kernel/core_pattern
```
This should be configured with a relative path or simply a simple filename, such as `core`. When your environment shows an absolute path you will need to change this setting. How to change this setting depends highly on the underlying system as the setting needs to be changed on the kernel of the host running the container.
You can put any pattern in `/proc/sys/kernel/core_pattern` as you see fit. eg. You can add the PID to the core pattern in one of two ways;
- You either include `%p` in the core_pattern. This gets substituted with the PID of the crashing process.
- Alternatively you could set `/proc/sys/kernel/core_uses_pid` to `1` in the same way as you set `core_pattern`. This will append the PID to the corefile if `%p` is not explicitly contained in the core_pattern.
When a coredump is written you can use the debug/launch configuration `Open core file` which is preconfigured in the devcontainer. This will open a fileprompt that lists all coredumps that are found in your workspace. When you want to debug coredumps from `citus_dev` that are run in your `/data` directory, you can add the data directory to your workspace. In the command pallet of vscode you can run `>Workspace: Add Folder to Workspace...` and select the `/data` directory. This will allow you to open the coredumps from the `/data` directory in the `Open core file` debug configuration.
### Windows (docker desktop)
When running in docker desktop on windows you will most likely need to change this setting. The linux guest in WSL2 that runs your container is the `docker-desktop` environment. The easiest way to get onto the host, where you can change this setting, is to open a powershell window and verify you have the docker-desktop environment listed.
```powershell
wsl --list
```
Among others this should list both `docker-desktop` and `docker-desktop-data`. You can then open a shell in the `docker-desktop` environment.
```powershell
wsl -d docker-desktop
```
Inside this shell you can verify that you have the right environment by running
```bash
cat /proc/sys/kernel/core_pattern
```
This should show the same configuration as the one you see inside the devcontainer. You can then change the setting by running the following command.
This will change the setting for the current session. If you want to make the change permanent you will need to add this to a startup script.
```bash
echo "core" > /proc/sys/kernel/core_pattern
```

View File

@ -20,6 +20,6 @@ tail -n +$RegisterCitusConfigVariables_begin_linenumber src/backend/distributed/
# extract citus gucs in the form of <tab><tab>"citus.X"
grep -P "^[\t][\t]\"citus\.[a-zA-Z_0-9]+\"" RegisterCitusConfigVariables_func_def.out > gucs.out
sort -c gucs.out
LC_COLLATE=C sort -c gucs.out
rm gucs.out
rm RegisterCitusConfigVariables_func_def.out

View File

@ -2946,7 +2946,7 @@ MajorVersionsCompatibleColumnar(char *leftVersion, char *rightVersion)
}
else
{
rightComparisionLimit = strlen(leftVersion);
rightComparisionLimit = strlen(rightVersion);
}
/* we can error out early if hypens are not in the same position */
@ -3021,6 +3021,8 @@ AvailableExtensionVersionColumnar(void)
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("citus extension is not found")));
return NULL; /* keep compiler happy */
}

View File

@ -48,7 +48,7 @@ The purpose of this document is to provide comprehensive technical documentation
- [Rebalancing algorithm](#rebalancing-algorithm)
- [Shard moves](#shard-moves)
- [Shard splits](#shard-splits)
- [Background tasks](#background-tasks)
- [Background task runner](#background-task-runner)
- [Resource cleanup](#resource-cleanup)
- [Logical decoding / CDC](#logical-decoding--cdc)
- [CDC ordering](#cdc-ordering)
@ -2344,17 +2344,164 @@ In the past we had some bugs where we had a `palloc` failure while holding `Spin
# Rebalancing
A high-level overview of the rebalancer is given in [this rebalancer blog post](https://www.citusdata.com/blog/2021/03/13/scaling-out-postgres-with-citus-open-source-shard-rebalancer/).
A high-level overview of the shard rebalancer is given in [this rebalancer blog post][rebalancer-post]. It is a bit outdated though, specifically that it uses `rebalance_table_shards()` instead of the newer `citus_rebalance_start()`.
The shard rebalancer consists of 4 main parts:
1. The rebalancing algorithm: Decides what moves/splits it should do to make
the cluster balanced.
2. The background task runner: Runs a full rebalance according to a plan
created by the planner.
3. A shard group moves/split: These are the smallest units of work that the
rebalancer does, if this fails midway through the move is aborted and the
shard group remains unchanged.
4. Deferred cleanup: The source shards stay present for a while after a move to
let long-running read queries continue, eventually they need to be cleaned
up.
These parts interact, but they are pretty self-contained. Usually it's only
necessary to change one of them to add a feature/fix a bug.
[rebalancer-post]: https://www.citusdata.com/blog/2021/03/13/scaling-out-postgres-with-citus-open-source-shard-rebalancer/
## Rebalancing algorithm
The rebalancing algorithm tries to find an optimal placement of shard groups
across nodes. This is not an easy job, because this is a [co-NP-complete
problem](https://en.wikipedia.org/wiki/Knapsack_problem). So instead of going for
the fully optimal solution it uses a greedy approach to reach a local
optimum, which so far has proved effective in getting to a pretty optimal
solution.
Even though it won't result in the perfect balance, the greedy approach has two
important practical benefits over a perfect solution:
1. It's relatively easy to understand why the algorithm decided on a certain move.
2. Every move makes the balance better. So if the rebalance is cancelled midway
through, the cluster will always be in a better situation than before.
As described in the [this rebalancer blog post][rebalance-post] the algorithm
takes three inputs from the function in the `pg_dist_rebalance_strategy` table:
1. Is a shard group allowed on a certain node?
2. What is the "cost" of a shard group, relative to the other shard groups?
3. What is the "capacity" of a node, relative to the other nodes?
Cost and capacity are vague on purpose, this way users can choose their own
way to determine cost of a shard group, but **in practice "cost" is usually
disk size** (because `by_disk_size` is the default rebalance strategy).
Capacity is almost always set to 1, because almost all Citus clusters are
homogeneous (they contain the same nodes, except for maybe the coordinator). The
main usage for "Is a shard group allowed on a certain node?" is to be able to pin a
specific shard group to a specific node.
There is one last definition that you should know to understand the algorithm
and that is "utilization". Utilization is the total cost of all shard groups
divided by capacity. In practice this means that utilization is almost always
the same as cost because as explained above capacity is almost always 1. So if
you see "utilization" in the algorithm, for all intents and purposes you can
read it as "cost".
The way the general algorithm works is fairly straightforward. It starts by
creating an in-memory representation of the cluster, and then it tries to
improve that in-memory representation by making theoretical moves. So to be
clear the algorithm doesn't actually do any shard group moves, it only does
those moves to its in-memory representation. The way it determines what
theoretical moves to make is as follows (updating utilization of in-memory
nodes after every move):
1. Find all shard groups that are on a node where they are not allowed (due to
"Is a shard group allowed on a certain node?")
2. Order those nodes by cost
3. Move them one-by one to nodes with the lowest utilization where they are
allowed.
4. If the cluster is balanced we are done.
5. Take the most utilized node (A) and take the least utilized node (B).
6. Try moving the shard group with the highest cost from A to B.
7. If the balance is "better" commit this move and continue from step 4. (See subsection below for
what is "better")
8. If the balance is worse/equal try again from step 6 with the shard group
with the next highest cost on node A. If this was the lowest cost shard on
node A, then try with the highest cost shard again but on the next least
utilized node after node B. If no moves helped with the balance, try with
the next most utilized node after node A. If we tried all moves for all
nodes like this, we are done (we cannot get a better balance).
Of course, the devil is in the details though.
### When is the balance better?
The main way to determine if the balance is better is by comparing the
utilization of node A and B, before and after the move and seeing if they are
net closer to the average utilization of the nodes in the cluster. The easiest
way to explain this is with a simple example:
We have two nodes A and B. A has a utilization of 100GB and B has a utilization
of 70GB. So we will move a shard from A to B. A move of 15GB is obviously best,
it results in perfect balance (A=85GB, B=85GB). A move of a 10GB is still
great, both improved in balance (A=90GB, B=80GB). A move of 20GB is also good,
the result is the same as a move of 10GB only with the nodes swapped (A=80GB,
B=90GB).
The 10GB vs 20GB move shows a limitation of the current algorithm. The
algorithm mostly makes choices based on the end state, not on the cost of
moving a shard. This is usually not a huge problem in practice though.
### Thresholds
The algorithm is full of thresholds, the main reason these exist is because
moving shards around isn't free.
- `threshold`: Used to determine if the cluster is in a good enough state. For
the `by_disk_size` rebalance strategy this is 10%, so if all nodes are at
most 10% above or 10% below the average utilization then no moves are
necessary anymore (i.e. the nodes are balanced enough). The main reason for
this threshold is that these small differences in utilization are not
necessarily problematic and might very well resolve automatically over time. For example, consider a scenario in which
one shard gets mostly written in during the weekend, while another one during
the week. Moving shards on Monday and that you then have to move back on
Friday is not very helpful given the overhead of moving data around.
- `improvement_threshold`: This is used in cases where a shard group move from
node A to B swaps which node now has the highest utilization (so afterwards B
will have higher utilization than A). As described above this can still
result in better balance. This threshold is meant to work around a
particularly bad situation where we move a lot of data for very little
benefit. Imagine this situation: A=200GB and B=99, thus moving a 100GB shard
from A to B would bring their utilization closer to the average (A=100GB,
B=199GB). But obviously that's a tiny gain for a move of 100GB, which
probably takes lots of resources and time. The `improvement_threshold` is set
to 50% for the `by_disk_size` rebalance strategy. This means that this move
is only chosen if the utilization improvement is larger than 50% of the
utilization that the shard group causes on its current node.
### How do multiple colocation groups impact the rebalancer algorithm?
The previous section glossed over colocation groups a bit. The main reason for
that is that the algorithm doesn't handle multiple colocation groups very well.
If there are multiple colocation groups each colocation group gets balanced
completely separately. For the utilization calculations only the costs are used
for the shard groups in the colocation group that is currently being rebalanced.
The reasoning for this is that if you have two colocation groups, you probably
want to spread the shard groups from both colocation groups across multiple
nodes. And not have shard groups from colocation group 1 only be on node A and
shard groups from colocation group 2 only be on node B.
There is an important caveat here though for colocation groups that have fewer
shard groups than the number of nodes in the cluster (in practice these are
usually colocation groups used by schema based sharding, i.e. with a single
shard group): The rebalancer algorithm balances the shard groups from these
colocation groups as if they are all all part of a single colocation group.
The main reason for this is to make sure that schemas for schema based sharding
are spread evenly across the nodes.
## Shard moves
Shard moves move a shard group placement to a different node (group). Moves are orchestrated by the `citus_move_shard_placement` UDF, which is also the function that the rebalancer runs to move a shard.
Shard moves move a shard group placement to a different node (group). It would be more correct if these were called "shard **group** moves", but in many places we don't due to historical reasons. Moves are orchestrated by the `citus_move_shard_placement` UDF, which is also the function that the rebalancer runs to move a shard.
We implement blocking and non-blocking shard splits. Non-blocking shard moves use logical replication, which has an important limitation. If the (distributed) table does not have a replica identity (usually the primary key), then update/delete commands will error out once we create a publication. That means using a non-blocking move without a replica identity does incur some downtime. Since a blocking move is generally faster (in part because it forces out regular work), it may be less invasive. We therefore force the user to choose when trying to move a shard group that includes a table without a replica identity by supplying `shard_transfer_mode := 'force_logical'` or `shard_transfer_mode := 'block_writes'`.
The blocking-move is mostly a simplified variant of the non-blocking move (with locks taken upfront). A non-blocking move involves the following steps:
The blocking-move is mostly a simplified variant of the non-blocking move, where the write locks are taken upfront so that no catch-up using logical replication is needed. A non-blocking move involves the following steps:
- **Create the new shard group placement on the target node**. We also create constraints that do not involve an index and set up ownership and access control.
- **Create publication(s) on the source node**. We create publications containing the shards in the source shard group placement. We create one publications per table owner, mainly because we need one subscription per table owner to prevent privilege escalation issues on older versions of PostgreSQL (15 and below).
@ -2379,7 +2526,7 @@ A workaround for the replica identity problem is to always assign REPLICA IDENTI
## Shard splits
Shard splits convert one shard group ("split parent") into two or more shard groups ("split children") by splitting the hash range. The new shard groups can be placed on the node itself, or on other nodes. We implement blocking and non-blocking shard splits. The blocking variant is mostly a simplified version of non-blocking, so we only cover non-blocking here. Shard splits have many similarities to shard moves, and have the same `shard_transfer_mode` choice.
Shard splits convert one shard group ("split parent") into two or more shard groups ("split children") by splitting the hash range. Just like with shard moves it would be more correct to call these "shard **group** splits", but again we often don't. The new shard groups can be placed on the node itself, or on other nodes. We implement blocking and non-blocking shard splits. The blocking variant is mostly a simplified version of non-blocking, so we only cover non-blocking here. Shard splits have many similarities to shard moves, and have the same `shard_transfer_mode` choice.
The shard split is a lengthy process performed by the `NonBlockingShardSplit` function, supported by a custom output plugin to handle writes that happen during the split. There are a few different entry-points in this logic, namely: `citus_split_shard_by_split_points`, `create_distributed_table_concurrently`, and `isolate_tenant_to_node`.
@ -2409,19 +2556,54 @@ A difference between splits and moves is that the old shard ID disappears. In ca
## Background tasks
In the past the only way to trigger a rebalance was to call
`rebalance_table_shards()`, this function run the rebalance using the current
session. This has the huge downside that the connection needs to be kept open
until the rebalance completes. So eventually we [introduced
`citus_rebalance_start()`](https://www.citusdata.com/blog/2022/09/19/citus-11-1-shards-postgres-tables-without-interruption/#rebalance-background),
which uses a background worker to do the rebalancing, so users can disconnect
their client and the rebalance continues. It even automatically retries moves
if they failed for some reason.
The way this works is using a general background job infrastructure that Citus
has in the tables `pg_dist_backround_job` and `pg_dist_background_task`.
A job (often) contains multiple tasks. In case of the rebalancer, the job is
the full rebalance, and each of its tasks are separate shard group moves.
### Parallel background task execution
A big benefit of the background task infrastructure is that it can execute tasks
and jobs in parallel. This can make rebalancing go much faster especially in
clusters with many nodes. To ensure that we're not doing too many tasks in
parallel though we have a few ways to limit concurrency:
1. Tasks can depend on each other. This makes sure that one task doesn't start
before all the ones that it depends on have finished.
2. The maximum number of parallel tasks being executed at the same time can be
limited using `citus.max_background_task_executors`. The default for
this is 4.
3. Tasks can specify which nodes are involved in the task, that way we can
control that a single node is not involved into too many tasks. The
rebalancer specifies both the source and target node as being involved in
the task. That together with the default of 1 for
`citus.max_background_task_executors_per_node` makes sure that a node
doesn't handle more than a single shard move at once, while still allowing
moves involving different nodes to happen in parallel. For larger machines
it can be beneficial to increase the default a bit.
## Resource cleanup
During a shard move/split, some PostgreSQL objects can be created that live outside of the scope of any transaction or are committed early. We need to make sure those objects are dropped once the shard move ends, either through failure or success. For instance, subscriptions and publications used for logical replication need to be dropped in case of failure, but also the target shard (in case of failure) and source shard (in case of success).
To achieve that, we write records to pg_dist_cleanup before creating an object to remember that we need to clean it. We distinguish between a few scenarios:
**Cleanup-always**: For most resources that require cleanup records, cleanup should happen regardless of whether the operation succeeds or fails. For instance, subscriptions and publications should always be dropped. We achieve cleanup always by writing pg_dist_cleanup records in a subtransaction, and at the end of the operation we try to clean up object immediately and if it succeeds delete the record. If cleanup fails, we do not fail the whole operation, but instead leave the pg_dist_cleanup record in place for the maintenance daemon.
**Cleanup-on-failure**: Cleanup should only happen if the operation fails. The main example is the target shard of a move/split. We achieve cleanup-on-failure by writing pg_dist_cleanup records in a subtransaction (transaction on a localhost connection that commits immediately) and deleting them in the outer transaction that performs the move/split. That way, they remain in pg_dist_cleanup in case of failure, but disappear in case of success.
**Cleanup-deferred-on-success**: Cleanup should only happen after the operation (move/split) succeeds. We use this to clean the source shards of a shard move. We previously dropped shards immediately as part of the transaction, but this frequently led to deadlocks at the end of a shard move. We achieve cleanup-on-success by writing pg_dist_cleanup records as part of the outer transaction that performs the move/split.
**Cleanup-always**: For most resources that require cleanup records, cleanup should happen regardless of whether the operation succeeds or fails. For instance, subscriptions and publications should always be dropped. We achieve cleanup always by writing pg_dist_cleanup records in a subtransaction, and at the end of the operation we try to clean up object immediately and if it succeeds delete the record. If cleanup fails, we do not fail the whole operation, but instead leave the pg_dist_cleanup record in place for the maintenance daemon.
Resource cleaner (currently shard_cleaner.c) is part of the maintenance daemon and periodically checks pg_dist_cleanup for cleanup tasks. Its important to prevent cleanup of operations that are already running. Therefore, each operation has a unique operation ID (from a sequence) and takes an advisory lock on the operation ID. The resource cleaner learns the operation ID from pg_dist_cleanup and attempts to acquire this lock. If it cannot acquire the lock, the operation is not done and cleanup is skipped. If it can, the operation is done, and the resource cleaner rechecks whether the record still exists, since it could have been deleted by the operation.
Resource cleaner (currently shard_cleaner.c) is part of the maintenance daemon and periodically checks pg_dist_cleanup for cleanup tasks. Its important to prevent cleanup of operations that are still running. Therefore, each operation has a unique operation ID (from a sequence) and takes an advisory lock on the operation ID. The resource cleaner learns the operation ID from pg_dist_cleanup and attempts to acquire this lock. If it cannot acquire the lock, the operation is not done and cleanup is skipped. If it can, the operation is done, and the resource cleaner rechecks whether the record still exists, since it could have been deleted by the operation.
Cleanup records always need to be committed before creating the actual object. Its also important for the cleanup operation to be idempotent, since the server might crash immediately after committing a cleanup record, but before actually creating the object. Hence, the object might not exist when trying to clean it up. In that case, the cleanup is seen as successful, and the cleanup record removed.
@ -2429,7 +2611,7 @@ Cleanup records always need to be committed before creating the actual object. I
PostgreSQL supports change data capture (CDC) via the logical decoding interface. The basic idea behind logical decoding is that you make a replication connection (a special type of postgres connection), start replication, and then the backend process reads through the WAL and decodes the WAL records and emits it over the wire in a format defined by the output plugin. If we were to use regular logical decoding on the nodes of a Citus cluster, we would see the name of the shard in each write, and internal data transfers such as shard moves would result in inserts being emitted. We use several techniques to avoid this.
All writes in PostgreSQL are marked with a replication origin (0 by default) and the decoder can make decisions on whether to emit the change based on the replication origin. We use this to filter out internal data transfers. If `citus.enable_change_data_capture` is enabled, all internal data transfers are marked with the special DoNotReplicateId replication origin by calling the `citus_internal_start_replication_origin_tracking()` UDF before writing the data. This replication origin ID is special in the sense that it does not need to be created (which prevents locking issues, especially when dropping replication origins). It is still up to output plugin to decide what to do with changes marked as DoNotReplicateId.
All writes in PostgreSQL are marked with a replication origin (0 by default) and the decoder can make decisions on whether to emit the change based on the replication origin. We use this to filter out internal data transfers. If `citus.enable_change_data_capture` is enabled, all internal data transfers are marked with the special DoNotReplicateId replication origin by calling the `citus_internal.start_replication_origin_tracking()` UDF before writing the data. This replication origin ID is special in the sense that it does not need to be created (which prevents locking issues, especially when dropping replication origins). It is still up to output plugin to decide what to do with changes marked as DoNotReplicateId.
We have very minimal control over replication commands like `CREATE_REPLICATION_SLOT`, since there are no direct hooks, and decoder names (e.g. “pgoutput”) are typically hard-coded in the client. The only method we found of overriding logical decoding behaviour is to overload the output plugin name in the dynamic library path.

View File

@ -397,7 +397,7 @@ AdjustClocksToTransactionHighest(List *nodeConnectionList,
/* Set the clock value on participating worker nodes */
appendStringInfo(queryToSend,
"SELECT pg_catalog.citus_internal_adjust_local_clock_to_remote"
"SELECT citus_internal.adjust_local_clock_to_remote"
"('(%lu, %u)'::pg_catalog.cluster_clock);",
transactionClockValue->logical, transactionClockValue->counter);

View File

@ -22,6 +22,7 @@
#include "catalog/dependency.h"
#include "catalog/index.h"
#include "catalog/pg_am.h"
#include "catalog/pg_attrdef.h"
#include "catalog/pg_attribute.h"
#include "catalog/pg_enum.h"
#include "catalog/pg_extension.h"
@ -50,6 +51,7 @@
#include "tcop/pquery.h"
#include "tcop/tcopprot.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
@ -1696,52 +1698,39 @@ PropagatePrerequisiteObjectsForDistributedTable(Oid relationId)
void
EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId, Oid ownerRelationId)
{
List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE);
citusTableIdList = list_append_unique_oid(citusTableIdList, ownerRelationId);
Oid attrDefOid;
List *attrDefOids = GetAttrDefsFromSequence(seqOid);
Oid citusTableId = InvalidOid;
foreach_oid(citusTableId, citusTableIdList)
foreach_oid(attrDefOid, attrDefOids)
{
List *seqInfoList = NIL;
GetDependentSequencesWithRelation(citusTableId, &seqInfoList, 0, DEPENDENCY_AUTO);
ObjectAddress columnAddress = GetAttrDefaultColumnAddress(attrDefOid);
SequenceInfo *seqInfo = NULL;
foreach_ptr(seqInfo, seqInfoList)
/*
* If another distributed table is using the same sequence
* in one of its column defaults, make sure the types of the
* columns match.
*
* We skip non-distributed tables, but we need to check the current
* table as it might reference the same sequence multiple times.
*/
if (columnAddress.objectId != ownerRelationId &&
!IsCitusTable(columnAddress.objectId))
{
AttrNumber currentAttnum = seqInfo->attributeNumber;
Oid currentSeqOid = seqInfo->sequenceOid;
if (!seqInfo->isNextValDefault)
{
/*
* If a sequence is not on the nextval, we don't need any check.
* This is a dependent sequence via ALTER SEQUENCE .. OWNED BY col
*/
continue;
}
/*
* If another distributed table is using the same sequence
* in one of its column defaults, make sure the types of the
* columns match
*/
if (currentSeqOid == seqOid)
{
Oid currentAttributeTypId = GetAttributeTypeOid(citusTableId,
currentAttnum);
if (attributeTypeId != currentAttributeTypId)
{
char *sequenceName = generate_qualified_relation_name(
seqOid);
char *citusTableName =
generate_qualified_relation_name(citusTableId);
ereport(ERROR, (errmsg(
"The sequence %s is already used for a different"
" type in column %d of the table %s",
sequenceName, currentAttnum,
citusTableName)));
}
}
continue;
}
Oid currentAttributeTypId = GetAttributeTypeOid(columnAddress.objectId,
columnAddress.objectSubId);
if (attributeTypeId != currentAttributeTypId)
{
char *sequenceName = generate_qualified_relation_name(
seqOid);
char *citusTableName =
generate_qualified_relation_name(columnAddress.objectId);
ereport(ERROR, (errmsg(
"The sequence %s is already used for a different"
" type in column %d of the table %s",
sequenceName, columnAddress.objectSubId,
citusTableName)));
}
}
}

View File

@ -40,15 +40,38 @@
#include "distributed/deparse_shard_query.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
#include "distributed/local_executor.h"
#include "distributed/metadata/distobject.h"
#include "distributed/metadata_sync.h"
#include "distributed/metadata_utility.h"
#include "distributed/multi_executor.h"
#include "distributed/relation_access_tracking.h"
#include "distributed/serialize_distributed_ddls.h"
#include "distributed/shard_cleaner.h"
#include "distributed/worker_protocol.h"
#include "distributed/worker_transaction.h"
/*
* Used to save original name of the database before it is replaced with a
* temporary name for failure handling purposes in PreprocessCreateDatabaseStmt().
*/
static char *CreateDatabaseCommandOriginalDbName = NULL;
/*
* The format string used when creating a temporary databases for failure
* handling purposes.
*
* The fields are as follows to ensure using a unique name for each temporary
* database:
* - operationId: The operation id returned by RegisterOperationNeedingCleanup().
* - groupId: The group id of the worker node where CREATE DATABASE command
* is issued from.
*/
#define TEMP_DATABASE_NAME_FMT "citus_temp_database_%lu_%d"
/*
* DatabaseCollationInfo is used to store collation related information of a database.
*/
@ -286,8 +309,9 @@ PreprocessAlterDatabaseStmt(Node *node, const char *queryString,
* NontransactionalNodeDDLTask to run the command on the workers outside
* the transaction block.
*/
return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands);
bool warnForPartialFailure = true;
return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands,
warnForPartialFailure);
}
else
{
@ -453,7 +477,12 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString,
*
* In this stage, we perform validations that we want to ensure before delegating to
* previous utility hooks because it might not be convenient to throw an error in an
* implicit transaction that creates a database.
* implicit transaction that creates a database. Also in this stage, we save the original
* database name and replace dbname field with a temporary name for failure handling
* purposes. We let Postgres create the database with the temporary name, insert a cleanup
* record for the temporary database name on all nodes and let PostprocessCreateDatabaseStmt()
* to return the distributed DDL job that both creates the database with the temporary name
* and then renames it back to its original name.
*
* We also serialize database commands globally by acquiring a Citus specific advisory
* lock based on OCLASS_DATABASE on the first primary worker node.
@ -467,24 +496,56 @@ PreprocessCreateDatabaseStmt(Node *node, const char *queryString,
return NIL;
}
EnsurePropagationToCoordinator();
EnsureCoordinatorIsInMetadata();
CreatedbStmt *stmt = castNode(CreatedbStmt, node);
EnsureSupportedCreateDatabaseCommand(stmt);
SerializeDistributedDDLsOnObjectClass(OCLASS_DATABASE);
OperationId operationId = RegisterOperationNeedingCleanup();
char *tempDatabaseName = psprintf(TEMP_DATABASE_NAME_FMT,
operationId, GetLocalGroupId());
List *remoteNodes = TargetWorkerSetNodeList(ALL_SHARD_NODES, RowShareLock);
WorkerNode *remoteNode = NULL;
foreach_ptr(remoteNode, remoteNodes)
{
InsertCleanupRecordOutsideTransaction(
CLEANUP_OBJECT_DATABASE,
pstrdup(quote_identifier(tempDatabaseName)),
remoteNode->groupId,
CLEANUP_ON_FAILURE
);
}
CreateDatabaseCommandOriginalDbName = stmt->dbname;
stmt->dbname = tempDatabaseName;
/*
* Delete cleanup records in the same transaction so that if the current
* transactions fails for some reason, then the cleanup records won't be
* deleted. In the happy path, we will delete the cleanup records without
* deferring them to the background worker.
*/
FinalizeOperationNeedingCleanupOnSuccess("create database");
return NIL;
}
/*
* PostprocessCreateDatabaseStmt is executed after the statement is applied to the local
* postgres instance. In this stage we prepare the commands that need to be run on
* all workers to create the database. Since the CREATE DATABASE statement gives error
* in a transaction block, we need to use NontransactionalNodeDDLTaskList to send the
* CREATE DATABASE statement to the workers.
* postgres instance.
*
* In this stage, we first rename the temporary database back to its original name for
* local node and then return a list of distributed DDL jobs to create the database with
* the temporary name and then to rename it back to its original name. That way, if CREATE
* DATABASE fails on any of the nodes, the temporary database will be cleaned up by the
* cleanup records that we inserted in PreprocessCreateDatabaseStmt() and in case of a
* failure, we won't leak any databases called as the name that user intended to use for
* the database.
*/
List *
PostprocessCreateDatabaseStmt(Node *node, const char *queryString)
@ -508,20 +569,71 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString)
char *createDatabaseCommand = DeparseTreeNode(node);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) createDatabaseCommand,
ENABLE_DDL_PROPAGATION);
List *createDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) createDatabaseCommand,
ENABLE_DDL_PROPAGATION);
return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands);
/*
* Since the CREATE DATABASE statements cannot be executed in a transaction
* block, we need to use NontransactionalNodeDDLTaskList() to send the CREATE
* DATABASE statement to the workers.
*/
bool warnForPartialFailure = false;
List *createDatabaseDDLJobList =
NontransactionalNodeDDLTaskList(REMOTE_NODES, createDatabaseCommands,
warnForPartialFailure);
CreatedbStmt *stmt = castNode(CreatedbStmt, node);
char *renameDatabaseCommand =
psprintf("ALTER DATABASE %s RENAME TO %s",
quote_identifier(stmt->dbname),
quote_identifier(CreateDatabaseCommandOriginalDbName));
List *renameDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION,
renameDatabaseCommand,
ENABLE_DDL_PROPAGATION);
/*
* We use NodeDDLTaskList() to send the RENAME DATABASE statement to the
* workers because we want to execute it in a coordinated transaction.
*/
List *renameDatabaseDDLJobList =
NodeDDLTaskList(REMOTE_NODES, renameDatabaseCommands);
/*
* Temporarily disable citus.enable_ddl_propagation before issuing
* rename command locally because we don't want to execute it on remote
* nodes yet. We will execute it on remote nodes by returning it as a
* distributed DDL job.
*
* The reason why we don't want to execute it on remote nodes yet is that
* the database is not created on remote nodes yet.
*/
int saveNestLevel = NewGUCNestLevel();
set_config_option("citus.enable_ddl_propagation", "off",
(superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION,
GUC_ACTION_LOCAL, true, 0, false);
ExecuteUtilityCommand(renameDatabaseCommand);
AtEOXact_GUC(true, saveNestLevel);
/*
* Restore the original database name because MarkObjectDistributed()
* resolves oid of the object based on the database name and is called
* after executing the distributed DDL job that renames temporary database.
*/
stmt->dbname = CreateDatabaseCommandOriginalDbName;
return list_concat(createDatabaseDDLJobList, renameDatabaseDDLJobList);
}
/*
* PreprocessDropDatabaseStmt is executed before the statement is applied to the local
* postgres instance. In this stage we can prepare the commands that need to be run on
* all workers to drop the database. Since the DROP DATABASE statement gives error in
* transaction context, we need to use NontransactionalNodeDDLTaskList to send the
* DROP DATABASE statement to the workers.
* all workers to drop the database.
*
* We also serialize database commands globally by acquiring a Citus specific advisory
* lock based on OCLASS_DATABASE on the first primary worker node.
@ -559,11 +671,20 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString,
char *dropDatabaseCommand = DeparseTreeNode(node);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) dropDatabaseCommand,
ENABLE_DDL_PROPAGATION);
List *dropDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) dropDatabaseCommand,
ENABLE_DDL_PROPAGATION);
return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands);
/*
* Due to same reason stated in PostprocessCreateDatabaseStmt(), we need to
* use NontransactionalNodeDDLTaskList() to send the DROP DATABASE statement
* to the workers.
*/
bool warnForPartialFailure = true;
List *dropDatabaseDDLJobList =
NontransactionalNodeDDLTaskList(REMOTE_NODES, dropDatabaseCommands,
warnForPartialFailure);
return dropDatabaseDDLJobList;
}
@ -890,7 +1011,7 @@ CreateDatabaseDDLCommand(Oid dbId)
/* Generate the CREATE DATABASE statement */
appendStringInfo(outerDbStmt,
"SELECT pg_catalog.citus_internal_database_command(%s)",
"SELECT citus_internal.database_command(%s)",
quote_literal_cstr(createStmt));
ReleaseSysCache(tuple);

View File

@ -1093,33 +1093,26 @@ List *
GetDependentFDWsToExtension(Oid extensionId)
{
List *extensionFDWs = NIL;
ScanKeyData key[3];
int scanKeyCount = 3;
ScanKeyData key[1];
HeapTuple tup;
Relation pgDepend = table_open(DependRelationId, AccessShareLock);
ScanKeyInit(&key[0],
Anum_pg_depend_refclassid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(ExtensionRelationId));
ScanKeyInit(&key[1],
Anum_pg_depend_refobjid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(extensionId));
ScanKeyInit(&key[2],
Anum_pg_depend_classid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(ForeignDataWrapperRelationId));
SysScanDesc scan = systable_beginscan(pgDepend, InvalidOid, false,
NULL, scanKeyCount, key);
SysScanDesc scan = systable_beginscan(pgDepend, DependDependerIndexId, true,
NULL, lengthof(key), key);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
Form_pg_depend pgDependEntry = (Form_pg_depend) GETSTRUCT(tup);
if (pgDependEntry->deptype == DEPENDENCY_EXTENSION)
if (pgDependEntry->deptype == DEPENDENCY_EXTENSION &&
pgDependEntry->refclassid == ExtensionRelationId &&
pgDependEntry->refobjid == extensionId)
{
extensionFDWs = lappend_oid(extensionFDWs, pgDependEntry->objid);
}

View File

@ -20,6 +20,7 @@
#include "access/xact.h"
#include "catalog/namespace.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_depend.h"
#include "catalog/pg_type.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
@ -36,6 +37,7 @@
#include "distributed/commands.h"
#include "distributed/commands/sequence.h"
#include "distributed/coordinator_protocol.h"
#include "distributed/hash_helpers.h"
#include "distributed/listutils.h"
#include "distributed/multi_join_order.h"
#include "distributed/namespace_utils.h"
@ -1198,6 +1200,114 @@ TableHasExternalForeignKeys(Oid relationId)
}
/*
* ForeignConstraintMatchesFlags is a function with logic that's very specific
* to GetForeignKeyOids. There's no reason to use it in any other context.
*/
static bool
ForeignConstraintMatchesFlags(Form_pg_constraint constraintForm,
int flags)
{
if (constraintForm->contype != CONSTRAINT_FOREIGN)
{
return false;
}
bool inheritedConstraint = OidIsValid(constraintForm->conparentid);
if (inheritedConstraint)
{
/*
* We only consider the constraints that are explicitly created on
* the table as we already process the constraints from parent tables
* implicitly when a command is issued
*/
return false;
}
bool excludeSelfReference = (flags & EXCLUDE_SELF_REFERENCES);
bool isSelfReference = (constraintForm->conrelid == constraintForm->confrelid);
if (excludeSelfReference && isSelfReference)
{
return false;
}
Oid otherTableId = InvalidOid;
if (flags & INCLUDE_REFERENCING_CONSTRAINTS)
{
otherTableId = constraintForm->confrelid;
}
else
{
otherTableId = constraintForm->conrelid;
}
return IsTableTypeIncluded(otherTableId, flags);
}
/*
* GetForeignKeyOidsForReferencedTable returns a list of foreign key OIDs that
* reference the relationId and match the given flags.
*
* This is separated from GetForeignKeyOids because we need to scan pg_depend
* instead of pg_constraint directly. The reason for this is that there is no
* index on the confrelid of pg_constraint, so searching by that column
* requires a seqscan.
*/
static List *
GetForeignKeyOidsForReferencedTable(Oid relationId, int flags)
{
HTAB *foreignKeyOidsSet = CreateSimpleHashSetWithName(
Oid, "ReferencingForeignKeyOidsSet");
List *foreignKeyOidsList = NIL;
ScanKeyData key[2];
HeapTuple dependTup;
Relation depRel = table_open(DependRelationId, AccessShareLock);
ScanKeyInit(&key[0],
Anum_pg_depend_refclassid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(RelationRelationId));
ScanKeyInit(&key[1],
Anum_pg_depend_refobjid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(relationId));
SysScanDesc scan = systable_beginscan(depRel, DependReferenceIndexId, true,
NULL, lengthof(key), key);
while (HeapTupleIsValid(dependTup = systable_getnext(scan)))
{
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(dependTup);
if (deprec->classid != ConstraintRelationId ||
deprec->deptype != DEPENDENCY_NORMAL ||
hash_search(foreignKeyOidsSet, &deprec->objid, HASH_FIND, NULL))
{
continue;
}
HeapTuple constraintTup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(
deprec->objid));
if (!HeapTupleIsValid(constraintTup)) /* can happen during DROP TABLE */
{
continue;
}
Form_pg_constraint constraint = (Form_pg_constraint) GETSTRUCT(constraintTup);
if (constraint->confrelid == relationId &&
ForeignConstraintMatchesFlags(constraint, flags))
{
foreignKeyOidsList = lappend_oid(foreignKeyOidsList, constraint->oid);
hash_search(foreignKeyOidsSet, &constraint->oid, HASH_ENTER, NULL);
}
ReleaseSysCache(constraintTup);
}
systable_endscan(scan);
table_close(depRel, AccessShareLock);
return foreignKeyOidsList;
}
/*
* GetForeignKeyOids takes in a relationId, and returns a list of OIDs for
* foreign constraints that the relation with relationId is involved according
@ -1207,9 +1317,8 @@ TableHasExternalForeignKeys(Oid relationId)
List *
GetForeignKeyOids(Oid relationId, int flags)
{
AttrNumber pgConstraintTargetAttrNumber = InvalidAttrNumber;
bool extractReferencing = (flags & INCLUDE_REFERENCING_CONSTRAINTS);
bool extractReferencing PG_USED_FOR_ASSERTS_ONLY = (flags &
INCLUDE_REFERENCING_CONSTRAINTS);
bool extractReferenced = (flags & INCLUDE_REFERENCED_CONSTRAINTS);
/*
@ -1220,22 +1329,10 @@ GetForeignKeyOids(Oid relationId, int flags)
Assert(!(extractReferencing && extractReferenced));
Assert(extractReferencing || extractReferenced);
bool useIndex = false;
Oid indexOid = InvalidOid;
if (extractReferencing)
if (extractReferenced)
{
pgConstraintTargetAttrNumber = Anum_pg_constraint_conrelid;
useIndex = true;
indexOid = ConstraintRelidTypidNameIndexId;
return GetForeignKeyOidsForReferencedTable(relationId, flags);
}
else if (extractReferenced)
{
pgConstraintTargetAttrNumber = Anum_pg_constraint_confrelid;
}
bool excludeSelfReference = (flags & EXCLUDE_SELF_REFERENCES);
List *foreignKeyOids = NIL;
@ -1243,62 +1340,22 @@ GetForeignKeyOids(Oid relationId, int flags)
int scanKeyCount = 1;
Relation pgConstraint = table_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], pgConstraintTargetAttrNumber,
ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid,
BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId));
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, indexOid, useIndex,
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint,
ConstraintRelidTypidNameIndexId, true,
NULL, scanKeyCount, scanKey);
HeapTuple heapTuple = systable_getnext(scanDescriptor);
while (HeapTupleIsValid(heapTuple))
HeapTuple heapTuple;
while (HeapTupleIsValid(heapTuple = systable_getnext(scanDescriptor)))
{
Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple);
if (constraintForm->contype != CONSTRAINT_FOREIGN)
if (ForeignConstraintMatchesFlags(constraintForm, flags))
{
heapTuple = systable_getnext(scanDescriptor);
continue;
foreignKeyOids = lappend_oid(foreignKeyOids, constraintForm->oid);
}
bool inheritedConstraint = OidIsValid(constraintForm->conparentid);
if (inheritedConstraint)
{
/*
* We only consider the constraints that are explicitly created on
* the table as we already process the constraints from parent tables
* implicitly when a command is issued
*/
heapTuple = systable_getnext(scanDescriptor);
continue;
}
Oid constraintId = constraintForm->oid;
bool isSelfReference = (constraintForm->conrelid == constraintForm->confrelid);
if (excludeSelfReference && isSelfReference)
{
heapTuple = systable_getnext(scanDescriptor);
continue;
}
Oid otherTableId = InvalidOid;
if (extractReferencing)
{
otherTableId = constraintForm->confrelid;
}
else if (extractReferenced)
{
otherTableId = constraintForm->conrelid;
}
if (!IsTableTypeIncluded(otherTableId, flags))
{
heapTuple = systable_getnext(scanDescriptor);
continue;
}
foreignKeyOids = lappend_oid(foreignKeyOids, constraintId);
heapTuple = systable_getnext(scanDescriptor);
}
systable_endscan(scanDescriptor);

View File

@ -493,6 +493,7 @@ GenerateCreateIndexDDLJob(IndexStmt *createIndexStatement, const char *createInd
ddlJob->startNewTransaction = createIndexStatement->concurrent;
ddlJob->metadataSyncCommand = createIndexCommand;
ddlJob->taskList = CreateIndexTaskList(createIndexStatement);
ddlJob->warnForPartialFailure = true;
return ddlJob;
}
@ -652,6 +653,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand,
"concurrently");
ddlJob->metadataSyncCommand = reindexCommand;
ddlJob->taskList = CreateReindexTaskList(relationId, reindexStatement);
ddlJob->warnForPartialFailure = true;
ddlJobs = list_make1(ddlJob);
}
@ -780,6 +782,7 @@ PreprocessDropIndexStmt(Node *node, const char *dropIndexCommand,
ddlJob->metadataSyncCommand = dropIndexCommand;
ddlJob->taskList = DropIndexTaskList(distributedRelationId, distributedIndexId,
dropIndexStatement);
ddlJob->warnForPartialFailure = true;
ddlJobs = list_make1(ddlJob);
}

View File

@ -2568,7 +2568,7 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu
* Find the shard interval and id for the partition column value for
* non-reference tables.
*
* For reference table, this function blindly returns the tables single
* For reference table, and single shard distributed table this function blindly returns the tables single
* shard.
*/
ShardInterval *shardInterval = FindShardInterval(partitionColumnValue, cacheEntry);
@ -2663,7 +2663,6 @@ CreateLocalColocatedIntermediateFile(CitusCopyDestReceiver *copyDest,
CreateIntermediateResultsDirectory();
const int fileFlags = (O_CREAT | O_RDWR | O_TRUNC);
const int fileMode = (S_IRUSR | S_IWUSR);
StringInfo filePath = makeStringInfo();
appendStringInfo(filePath, "%s_%ld", copyDest->colocatedIntermediateResultIdPrefix,
@ -2671,7 +2670,7 @@ CreateLocalColocatedIntermediateFile(CitusCopyDestReceiver *copyDest,
const char *fileName = QueryResultFileName(filePath->data);
shardState->fileDest =
FileCompatFromFileStart(FileOpenForTransmit(fileName, fileFlags, fileMode));
FileCompatFromFileStart(FileOpenForTransmit(fileName, fileFlags));
CopyOutState localFileCopyOutState = shardState->copyOutState;
bool isBinaryCopy = localFileCopyOutState->binary;

View File

@ -0,0 +1,351 @@
/*-------------------------------------------------------------------------
*
* non_main_db_distribute_object_ops.c
*
* Routines to support node-wide object management commands from non-main
* databases.
*
* RunPreprocessNonMainDBCommand and RunPostprocessNonMainDBCommand are
* the entrypoints for this module. These functions are called from
* utility_hook.c to support some of the node-wide object management
* commands from non-main databases.
*
* To add support for a new command type, one needs to define a new
* NonMainDbDistributeObjectOps object within OperationArray. Also, if
* the command requires marking or unmarking some objects as distributed,
* the necessary operations can be implemented in
* RunPreprocessNonMainDBCommand and RunPostprocessNonMainDBCommand.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/xact.h"
#include "catalog/pg_authid_d.h"
#include "nodes/nodes.h"
#include "nodes/parsenodes.h"
#include "utils/builtins.h"
#include "distributed/commands.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
#include "distributed/metadata_cache.h"
#include "distributed/remote_transaction.h"
#define EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER \
"SELECT citus_internal.execute_command_on_remote_nodes_as_user(%s, %s)"
#define START_MANAGEMENT_TRANSACTION \
"SELECT citus_internal.start_management_transaction('%lu')"
#define MARK_OBJECT_DISTRIBUTED \
"SELECT citus_internal.mark_object_distributed(%d, %s, %d, %s)"
#define UNMARK_OBJECT_DISTRIBUTED \
"SELECT pg_catalog.citus_unmark_object_distributed(%d, %d, %d, %s)"
/*
* NonMainDbDistributeObjectOps contains the necessary callbacks / flags to
* support node-wide object management commands from non-main databases.
*
* cannotBeExecutedInTransaction:
* Indicates whether the statement cannot be executed in a transaction. If
* this is set to true, the statement will be executed directly on the main
* database because there are no transactional visibility issues for such
* commands.
*
* checkSupportedObjectType:
* Callback function that checks whether type of the object referred to by
* given statement is supported. Can be NULL if not applicable for the
* statement type.
*/
typedef struct NonMainDbDistributeObjectOps
{
bool cannotBeExecutedInTransaction;
bool (*checkSupportedObjectType)(Node *parsetree);
} NonMainDbDistributeObjectOps;
/*
* checkSupportedObjectType callbacks for OperationArray.
*/
static bool CreateDbStmtCheckSupportedObjectType(Node *node);
static bool DropDbStmtCheckSupportedObjectType(Node *node);
static bool GrantStmtCheckSupportedObjectType(Node *node);
static bool SecLabelStmtCheckSupportedObjectType(Node *node);
/*
* OperationArray that holds NonMainDbDistributeObjectOps for different command types.
*/
static const NonMainDbDistributeObjectOps *const OperationArray[] = {
[T_CreateRoleStmt] = &(NonMainDbDistributeObjectOps) {
.cannotBeExecutedInTransaction = false,
.checkSupportedObjectType = NULL
},
[T_DropRoleStmt] = &(NonMainDbDistributeObjectOps) {
.cannotBeExecutedInTransaction = false,
.checkSupportedObjectType = NULL
},
[T_AlterRoleStmt] = &(NonMainDbDistributeObjectOps) {
.cannotBeExecutedInTransaction = false,
.checkSupportedObjectType = NULL
},
[T_GrantRoleStmt] = &(NonMainDbDistributeObjectOps) {
.cannotBeExecutedInTransaction = false,
.checkSupportedObjectType = NULL
},
[T_CreatedbStmt] = &(NonMainDbDistributeObjectOps) {
.cannotBeExecutedInTransaction = true,
.checkSupportedObjectType = CreateDbStmtCheckSupportedObjectType
},
[T_DropdbStmt] = &(NonMainDbDistributeObjectOps) {
.cannotBeExecutedInTransaction = true,
.checkSupportedObjectType = DropDbStmtCheckSupportedObjectType
},
[T_GrantStmt] = &(NonMainDbDistributeObjectOps) {
.cannotBeExecutedInTransaction = false,
.checkSupportedObjectType = GrantStmtCheckSupportedObjectType
},
[T_SecLabelStmt] = &(NonMainDbDistributeObjectOps) {
.cannotBeExecutedInTransaction = false,
.checkSupportedObjectType = SecLabelStmtCheckSupportedObjectType
},
};
/* other static function declarations */
const NonMainDbDistributeObjectOps * GetNonMainDbDistributeObjectOps(Node *parsetree);
static void CreateRoleStmtMarkDistGloballyOnMainDbs(CreateRoleStmt *createRoleStmt);
static void DropRoleStmtUnmarkDistOnLocalMainDb(DropRoleStmt *dropRoleStmt);
static void MarkObjectDistributedGloballyOnMainDbs(Oid catalogRelId, Oid objectId,
char *objectName);
static void UnmarkObjectDistributedOnLocalMainDb(uint16 catalogRelId, Oid objectId);
/*
* RunPreprocessNonMainDBCommand runs the necessary commands for a query, in main
* database before query is run on the local node with PrevProcessUtility.
*
* Returns true if previous utility hook needs to be skipped after completing
* preprocess phase.
*/
bool
RunPreprocessNonMainDBCommand(Node *parsetree)
{
if (IsMainDB)
{
return false;
}
const NonMainDbDistributeObjectOps *ops = GetNonMainDbDistributeObjectOps(parsetree);
if (!ops)
{
return false;
}
char *queryString = DeparseTreeNode(parsetree);
/*
* For the commands that cannot be executed in a transaction, there are no
* transactional visibility issues. We directly route them to main database
* so that we only have to consider one code-path for such commands.
*/
if (ops->cannotBeExecutedInTransaction)
{
IsMainDBCommandInXact = false;
RunCitusMainDBQuery((char *) queryString);
return true;
}
IsMainDBCommandInXact = true;
StringInfo mainDBQuery = makeStringInfo();
appendStringInfo(mainDBQuery,
START_MANAGEMENT_TRANSACTION,
GetCurrentFullTransactionId().value);
RunCitusMainDBQuery(mainDBQuery->data);
mainDBQuery = makeStringInfo();
appendStringInfo(mainDBQuery,
EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER,
quote_literal_cstr(queryString),
quote_literal_cstr(CurrentUserName()));
RunCitusMainDBQuery(mainDBQuery->data);
if (IsA(parsetree, DropRoleStmt))
{
DropRoleStmtUnmarkDistOnLocalMainDb((DropRoleStmt *) parsetree);
}
return false;
}
/*
* RunPostprocessNonMainDBCommand runs the necessary commands for a query, in main
* database after query is run on the local node with PrevProcessUtility.
*/
void
RunPostprocessNonMainDBCommand(Node *parsetree)
{
if (IsMainDB || !GetNonMainDbDistributeObjectOps(parsetree))
{
return;
}
if (IsA(parsetree, CreateRoleStmt))
{
CreateRoleStmtMarkDistGloballyOnMainDbs((CreateRoleStmt *) parsetree);
}
}
/*
* GetNonMainDbDistributeObjectOps returns the NonMainDbDistributeObjectOps for given
* command if it's node-wide object management command that's supported from non-main
* databases.
*/
const NonMainDbDistributeObjectOps *
GetNonMainDbDistributeObjectOps(Node *parsetree)
{
NodeTag tag = nodeTag(parsetree);
if (tag >= lengthof(OperationArray))
{
return NULL;
}
const NonMainDbDistributeObjectOps *ops = OperationArray[tag];
if (ops == NULL)
{
return NULL;
}
if (!ops->checkSupportedObjectType ||
ops->checkSupportedObjectType(parsetree))
{
return ops;
}
return NULL;
}
/*
* CreateRoleStmtMarkDistGloballyOnMainDbs marks the role as
* distributed on all main databases globally.
*/
static void
CreateRoleStmtMarkDistGloballyOnMainDbs(CreateRoleStmt *createRoleStmt)
{
/* object must exist as we've just created it */
bool missingOk = false;
Oid roleId = get_role_oid(createRoleStmt->role, missingOk);
MarkObjectDistributedGloballyOnMainDbs(AuthIdRelationId, roleId,
createRoleStmt->role);
}
/*
* DropRoleStmtUnmarkDistOnLocalMainDb unmarks the roles as
* distributed on the local main database.
*/
static void
DropRoleStmtUnmarkDistOnLocalMainDb(DropRoleStmt *dropRoleStmt)
{
RoleSpec *roleSpec = NULL;
foreach_ptr(roleSpec, dropRoleStmt->roles)
{
Oid roleOid = get_role_oid(roleSpec->rolename,
dropRoleStmt->missing_ok);
if (roleOid == InvalidOid)
{
continue;
}
UnmarkObjectDistributedOnLocalMainDb(AuthIdRelationId, roleOid);
}
}
/*
* MarkObjectDistributedGloballyOnMainDbs marks an object as
* distributed on all main databases globally.
*/
static void
MarkObjectDistributedGloballyOnMainDbs(Oid catalogRelId, Oid objectId, char *objectName)
{
StringInfo mainDBQuery = makeStringInfo();
appendStringInfo(mainDBQuery,
MARK_OBJECT_DISTRIBUTED,
catalogRelId,
quote_literal_cstr(objectName),
objectId,
quote_literal_cstr(CurrentUserName()));
RunCitusMainDBQuery(mainDBQuery->data);
}
/*
* UnmarkObjectDistributedOnLocalMainDb unmarks an object as
* distributed on the local main database.
*/
static void
UnmarkObjectDistributedOnLocalMainDb(uint16 catalogRelId, Oid objectId)
{
const int subObjectId = 0;
const char *checkObjectExistence = "false";
StringInfo query = makeStringInfo();
appendStringInfo(query,
UNMARK_OBJECT_DISTRIBUTED,
catalogRelId, objectId,
subObjectId, checkObjectExistence);
RunCitusMainDBQuery(query->data);
}
/*
* checkSupportedObjectTypes callbacks for OperationArray lie below.
*/
static bool
CreateDbStmtCheckSupportedObjectType(Node *node)
{
/*
* We don't try to send the query to the main database if the CREATE
* DATABASE command is for the main database itself, this is a very
* rare case but it's exercised by our test suite.
*/
CreatedbStmt *stmt = castNode(CreatedbStmt, node);
return strcmp(stmt->dbname, MainDb) != 0;
}
static bool
DropDbStmtCheckSupportedObjectType(Node *node)
{
/*
* We don't try to send the query to the main database if the DROP
* DATABASE command is for the main database itself, this is a very
* rare case but it's exercised by our test suite.
*/
DropdbStmt *stmt = castNode(DropdbStmt, node);
return strcmp(stmt->dbname, MainDb) != 0;
}
static bool
GrantStmtCheckSupportedObjectType(Node *node)
{
GrantStmt *stmt = castNode(GrantStmt, node);
return stmt->objtype == OBJECT_DATABASE;
}
static bool
SecLabelStmtCheckSupportedObjectType(Node *node)
{
SecLabelStmt *stmt = castNode(SecLabelStmt, node);
return stmt->objtype == OBJECT_ROLE;
}

View File

@ -491,18 +491,17 @@ GenerateRoleOptionsList(HeapTuple tuple)
options = lappend(options, makeDefElem("password", NULL, -1));
}
/* load valid unitl data from the heap tuple, use default of infinity if not set */
/* load valid until data from the heap tuple */
Datum rolValidUntilDatum = SysCacheGetAttr(AUTHNAME, tuple,
Anum_pg_authid_rolvaliduntil, &isNull);
char *rolValidUntil = "infinity";
if (!isNull)
{
rolValidUntil = pstrdup((char *) timestamptz_to_str(rolValidUntilDatum));
}
char *rolValidUntil = pstrdup((char *) timestamptz_to_str(rolValidUntilDatum));
Node *validUntilStringNode = (Node *) makeString(rolValidUntil);
DefElem *validUntilOption = makeDefElem("validUntil", validUntilStringNode, -1);
options = lappend(options, validUntilOption);
Node *validUntilStringNode = (Node *) makeString(rolValidUntil);
DefElem *validUntilOption = makeDefElem("validUntil", validUntilStringNode, -1);
options = lappend(options, validUntilOption);
}
return options;
}
@ -886,6 +885,14 @@ GenerateGrantRoleStmtsOfRole(Oid roleid)
{
Form_pg_auth_members membership = (Form_pg_auth_members) GETSTRUCT(tuple);
ObjectAddress *roleAddress = palloc0(sizeof(ObjectAddress));
ObjectAddressSet(*roleAddress, AuthIdRelationId, membership->grantor);
if (!IsAnyObjectDistributed(list_make1(roleAddress)))
{
/* we only need to propagate the grant if the grantor is distributed */
continue;
}
GrantRoleStmt *grantRoleStmt = makeNode(GrantRoleStmt);
grantRoleStmt->is_grant = true;
@ -901,7 +908,11 @@ GenerateGrantRoleStmtsOfRole(Oid roleid)
granteeRole->rolename = GetUserNameFromId(membership->member, true);
grantRoleStmt->grantee_roles = list_make1(granteeRole);
grantRoleStmt->grantor = NULL;
RoleSpec *grantorRole = makeNode(RoleSpec);
grantorRole->roletype = ROLESPEC_CSTRING;
grantorRole->location = -1;
grantorRole->rolename = GetUserNameFromId(membership->grantor, false);
grantRoleStmt->grantor = grantorRole;
#if PG_VERSION_NUM >= PG_VERSION_16
@ -1241,12 +1252,6 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString,
return NIL;
}
/*
* Postgres don't seem to use the grantor. Even dropping the grantor doesn't
* seem to affect the membership. If this changes, we might need to add grantors
* to the dependency resolution too. For now we just don't propagate it.
*/
stmt->grantor = NULL;
stmt->grantee_roles = distributedGranteeRoles;
char *sql = DeparseTreeNode((Node *) stmt);
stmt->grantee_roles = allGranteeRoles;

View File

@ -29,7 +29,7 @@
List *
PostprocessSecLabelStmt(Node *node, const char *queryString)
{
if (!ShouldPropagate())
if (!EnableAlterRolePropagation || !ShouldPropagate())
{
return NIL;
}
@ -59,21 +59,17 @@ PostprocessSecLabelStmt(Node *node, const char *queryString)
return NIL;
}
if (!EnableCreateRolePropagation)
{
return NIL;
}
EnsureCoordinator();
EnsurePropagationToCoordinator();
EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses);
const char *sql = DeparseTreeNode((Node *) secLabelStmt);
const char *secLabelCommands = DeparseTreeNode((Node *) secLabelStmt);
List *commandList = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
(void *) secLabelCommands,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList);
return NodeDDLTaskList(REMOTE_NODES, commandList);
}

View File

@ -14,6 +14,7 @@
#include "access/xact.h"
#include "catalog/dependency.h"
#include "catalog/namespace.h"
#include "catalog/pg_attrdef.h"
#include "commands/defrem.h"
#include "commands/extension.h"
#include "nodes/makefuncs.h"
@ -507,22 +508,14 @@ PreprocessAlterSequenceStmt(Node *node, const char *queryString,
static Oid
SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress, char depType)
{
List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE);
Oid citusTableId = InvalidOid;
foreach_oid(citusTableId, citusTableIdList)
Oid relationId;
List *relations = GetDependentRelationsWithSequence(sequenceAddress->objectId,
depType);
foreach_oid(relationId, relations)
{
List *seqInfoList = NIL;
GetDependentSequencesWithRelation(citusTableId, &seqInfoList, 0, depType);
SequenceInfo *seqInfo = NULL;
foreach_ptr(seqInfo, seqInfoList)
if (IsCitusTable(relationId))
{
/*
* This sequence is used in a distributed table
*/
if (seqInfo->sequenceOid == sequenceAddress->objectId)
{
return citusTableId;
}
return relationId;
}
}

View File

@ -3053,11 +3053,15 @@ ErrorUnsupportedAlterTableAddColumn(Oid relationId, AlterTableCmd *command,
else if (constraint->contype == CONSTR_FOREIGN)
{
RangeVar *referencedTable = constraint->pktable;
char *referencedColumn = strVal(lfirst(list_head(constraint->pk_attrs)));
Oid referencedRelationId = RangeVarGetRelid(referencedTable, NoLock, false);
appendStringInfo(errHint, "FOREIGN KEY (%s) REFERENCES %s(%s)", colName,
get_rel_name(referencedRelationId), referencedColumn);
appendStringInfo(errHint, "FOREIGN KEY (%s) REFERENCES %s", colName,
get_rel_name(referencedRelationId));
if (list_length(constraint->pk_attrs) > 0)
{
AppendColumnNameList(errHint, constraint->pk_attrs);
}
if (constraint->fk_del_action == FKCONSTR_ACTION_SETNULL)
{

View File

@ -87,14 +87,6 @@
#include "distributed/worker_shard_visibility.h"
#include "distributed/worker_transaction.h"
#define EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER \
"SELECT citus_internal.execute_command_on_remote_nodes_as_user(%s, %s)"
#define START_MANAGEMENT_TRANSACTION \
"SELECT citus_internal.start_management_transaction('%lu')"
#define MARK_OBJECT_DISTRIBUTED \
"SELECT citus_internal.mark_object_distributed(%d, %s, %d, %s)"
bool EnableDDLPropagation = true; /* ddl propagation is enabled */
int CreateObjectPropagationMode = CREATE_OBJECT_PROPAGATION_IMMEDIATE;
PropSetCmdBehavior PropagateSetCommands = PROPSETCMD_NONE; /* SET prop off */
@ -122,8 +114,7 @@ static void PostStandardProcessUtility(Node *parsetree);
static void DecrementUtilityHookCountersIfNecessary(Node *parsetree);
static bool IsDropSchemaOrDB(Node *parsetree);
static bool ShouldCheckUndistributeCitusLocalTables(void);
static void RunPreprocessMainDBCommand(Node *parsetree, const char *queryString);
static void RunPostprocessMainDBCommand(Node *parsetree);
/*
* ProcessUtilityParseTree is a convenience method to create a PlannedStmt out of
@ -255,23 +246,27 @@ citus_ProcessUtility(PlannedStmt *pstmt,
if (!CitusHasBeenLoaded())
{
if (!IsMainDB)
{
RunPreprocessMainDBCommand(parsetree, queryString);
}
/*
* Ensure that utility commands do not behave any differently until CREATE
* EXTENSION is invoked.
* Process the command via RunPreprocessNonMainDBCommand and
* RunPostprocessNonMainDBCommand hooks if we're in a non-main database
* and if the command is a node-wide object management command that we
* support from non-main databases.
*/
PrevProcessUtility(pstmt, queryString, false, context,
params, queryEnv, dest, completionTag);
if (!IsMainDB)
bool shouldSkipPrevUtilityHook = RunPreprocessNonMainDBCommand(parsetree);
if (!shouldSkipPrevUtilityHook)
{
RunPostprocessMainDBCommand(parsetree);
/*
* Ensure that utility commands do not behave any differently until CREATE
* EXTENSION is invoked.
*/
PrevProcessUtility(pstmt, queryString, false, context,
params, queryEnv, dest, completionTag);
}
RunPostprocessNonMainDBCommand(parsetree);
return;
}
else if (IsA(parsetree, CallStmt))
@ -738,6 +733,13 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
errhint("Connect to other nodes directly to manually create all"
" necessary users and roles.")));
}
else if (IsA(parsetree, SecLabelStmt) && !EnableAlterRolePropagation)
{
ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands to other"
" nodes"),
errhint("Connect to other nodes directly to manually assign"
" necessary labels.")));
}
/*
* Make sure that on DROP EXTENSION we terminate the background daemon
@ -1287,7 +1289,7 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
errhint("Use DROP INDEX CONCURRENTLY IF EXISTS to remove the "
"invalid index, then retry the original command.")));
}
else
else if (ddlJob->warnForPartialFailure)
{
ereport(WARNING,
(errmsg(
@ -1296,9 +1298,9 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
"state.\nIf the problematic command is a CREATE operation, "
"consider using the 'IF EXISTS' syntax to drop the object,"
"\nif applicable, and then re-attempt the original command.")));
PG_RE_THROW();
}
PG_RE_THROW();
}
PG_END_TRY();
}
@ -1514,9 +1516,12 @@ DDLTaskList(Oid relationId, const char *commandString)
* NontransactionalNodeDDLTaskList builds a list of tasks to execute a DDL command on a
* given target set of nodes with cannotBeExecutedInTransaction is set to make sure
* that task list is executed outside a transaction block.
*
* Also sets warnForPartialFailure for the returned DDLJobs.
*/
List *
NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands)
NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands,
bool warnForPartialFailure)
{
List *ddlJobs = NodeDDLTaskList(targets, commands);
DDLJob *ddlJob = NULL;
@ -1527,6 +1532,8 @@ NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands)
{
task->cannotBeExecutedInTransaction = true;
}
ddlJob->warnForPartialFailure = warnForPartialFailure;
}
return ddlJobs;
}
@ -1594,50 +1601,3 @@ DropSchemaOrDBInProgress(void)
{
return activeDropSchemaOrDBs > 0;
}
/*
* RunPreprocessMainDBCommand runs the necessary commands for a query, in main
* database before query is run on the local node with PrevProcessUtility
*/
static void
RunPreprocessMainDBCommand(Node *parsetree, const char *queryString)
{
if (IsA(parsetree, CreateRoleStmt))
{
StringInfo mainDBQuery = makeStringInfo();
appendStringInfo(mainDBQuery,
START_MANAGEMENT_TRANSACTION,
GetCurrentFullTransactionId().value);
RunCitusMainDBQuery(mainDBQuery->data);
mainDBQuery = makeStringInfo();
appendStringInfo(mainDBQuery,
EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER,
quote_literal_cstr(queryString),
quote_literal_cstr(CurrentUserName()));
RunCitusMainDBQuery(mainDBQuery->data);
}
}
/*
* RunPostprocessMainDBCommand runs the necessary commands for a query, in main
* database after query is run on the local node with PrevProcessUtility
*/
static void
RunPostprocessMainDBCommand(Node *parsetree)
{
if (IsA(parsetree, CreateRoleStmt))
{
StringInfo mainDBQuery = makeStringInfo();
CreateRoleStmt *createRoleStmt = castNode(CreateRoleStmt, parsetree);
Oid roleOid = get_role_oid(createRoleStmt->role, false);
appendStringInfo(mainDBQuery,
MARK_OBJECT_DISTRIBUTED,
AuthIdRelationId,
quote_literal_cstr(createRoleStmt->role),
roleOid,
quote_literal_cstr(CurrentUserName()));
RunCitusMainDBQuery(mainDBQuery->data);
}
}

View File

@ -271,9 +271,24 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
* We allocate everything in the provided context so as to facilitate using
* pfree on all runtime parameters when connections using these entries are
* invalidated during config reloads.
*
* Also, when "host" is already provided in global parameters, we use hostname
* from the key as "hostaddr" instead of "host" to avoid host name lookup. In
* that case, the value for "host" becomes useful only if the authentication
* method requires it.
*/
bool gotHostParamFromGlobalParams = false;
for (Size paramIndex = 0; paramIndex < ConnParams.size; paramIndex++)
{
if (strcmp(ConnParams.keywords[paramIndex], "host") == 0)
{
gotHostParamFromGlobalParams = true;
break;
}
}
const char *runtimeKeywords[] = {
"host",
gotHostParamFromGlobalParams ? "hostaddr" : "host",
"port",
"dbname",
"user",

View File

@ -883,7 +883,7 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts)
palloc(totalConnectionCount * sizeof(MultiConnection *));
WaitEvent *events = palloc(totalConnectionCount * sizeof(WaitEvent));
bool *connectionReady = palloc(totalConnectionCount * sizeof(bool));
WaitEventSet *waitEventSet = NULL;
WaitEventSet *volatile waitEventSet = NULL;
/* convert connection list to an array such that we can move items around */
MultiConnection *connectionItem = NULL;

View File

@ -74,7 +74,7 @@ AppendGrantRestrictAndCascade(StringInfo buf, GrantStmt *stmt)
void
AppendGrantedByInGrantForRoleSpec(StringInfo buf, RoleSpec *grantor, bool isGrant)
{
if (isGrant && grantor)
if (grantor)
{
appendStringInfo(buf, " GRANTED BY %s", RoleSpecString(grantor, true));
}

View File

@ -277,6 +277,11 @@ AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt)
/*
* Make sure that we don't try to deparse something that this
* function doesn't expect.
*
* This is also useful to throw an error for unsupported CREATE
* DATABASE options when the command is issued from non-main dbs
* because we use the same function to deparse CREATE DATABASE
* commands there too.
*/
EnsureSupportedCreateDatabaseCommand(stmt);

View File

@ -488,7 +488,6 @@ AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt)
AppendGrantWithAdminOption(buf, stmt);
AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant);
AppendGrantRestrictAndCascadeForRoleSpec(buf, stmt->behavior, stmt->is_grant);
AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant);
appendStringInfo(buf, ";");
}

View File

@ -121,7 +121,7 @@ AppendAlterTableStmt(StringInfo buf, AlterTableStmt *stmt)
* AppendColumnNameList converts a list of columns into comma separated string format
* (colname_1, colname_2, .., colname_n).
*/
static void
void
AppendColumnNameList(StringInfo buf, List *columns)
{
appendStringInfoString(buf, " (");

View File

@ -295,7 +295,6 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest)
if (resultDest->writeLocalFile)
{
const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY);
const int fileMode = (S_IRUSR | S_IWUSR);
/* make sure the directory exists */
CreateIntermediateResultsDirectory();
@ -303,8 +302,7 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest)
const char *fileName = QueryResultFileName(resultId);
resultDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit(fileName,
fileFlags,
fileMode));
fileFlags));
}
WorkerNode *workerNode = NULL;
@ -606,7 +604,7 @@ CreateIntermediateResultsDirectory(void)
{
char *resultDirectory = IntermediateResultsDirectory();
int makeOK = mkdir(resultDirectory, S_IRWXU);
int makeOK = MakePGDirectory(resultDirectory);
if (makeOK != 0)
{
if (errno == EEXIST)
@ -976,7 +974,6 @@ FetchRemoteIntermediateResult(MultiConnection *connection, char *resultId)
StringInfo copyCommand = makeStringInfo();
const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY);
const int fileMode = (S_IRUSR | S_IWUSR);
PGconn *pgConn = connection->pgConn;
int socket = PQsocket(pgConn);
@ -998,7 +995,7 @@ FetchRemoteIntermediateResult(MultiConnection *connection, char *resultId)
PQclear(result);
File fileDesc = FileOpenForTransmit(localPath, fileFlags, fileMode);
File fileDesc = FileOpenForTransmit(localPath, fileFlags);
FileCompat fileCompat = FileCompatFromFileStart(fileDesc);
while (true)

View File

@ -17,6 +17,7 @@
#include "pgstat.h"
#include "commands/defrem.h"
#include "common/file_perm.h"
#include "libpq/libpq.h"
#include "libpq/pqformat.h"
#include "storage/fd.h"
@ -48,8 +49,7 @@ RedirectCopyDataToRegularFile(const char *filename)
{
StringInfo copyData = makeStringInfo();
const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY);
const int fileMode = (S_IRUSR | S_IWUSR);
File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode);
File fileDesc = FileOpenForTransmit(filename, fileFlags);
FileCompat fileCompat = FileCompatFromFileStart(fileDesc);
SendCopyInStart();
@ -92,7 +92,7 @@ SendRegularFile(const char *filename)
const int fileMode = 0;
/* we currently do not check if the caller has permissions for this file */
File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode);
File fileDesc = FileOpenForTransmitPerm(filename, fileFlags, fileMode);
FileCompat fileCompat = FileCompatFromFileStart(fileDesc);
/*
@ -136,12 +136,23 @@ FreeStringInfo(StringInfo stringInfo)
/*
* FileOpenForTransmit opens file with the given filename and flags. On success,
* the function returns the internal file handle for the opened file. On failure
* the function errors out.
* Open a file with FileOpenForTransmitPerm() and pass default file mode for
* the fileMode parameter.
*/
File
FileOpenForTransmit(const char *filename, int fileFlags, int fileMode)
FileOpenForTransmit(const char *filename, int fileFlags)
{
return FileOpenForTransmitPerm(filename, fileFlags, pg_file_create_mode);
}
/*
* FileOpenForTransmitPerm opens file with the given filename and flags. On
* success, the function returns the internal file handle for the opened file.
* On failure the function errors out.
*/
File
FileOpenForTransmitPerm(const char *filename, int fileFlags, int fileMode)
{
struct stat fileStat;

View File

@ -98,10 +98,10 @@ mark_object_distributed(PG_FUNCTION_ARGS)
/*
* citus_unmark_object_distributed(classid oid, objid oid, objsubid int)
* citus_unmark_object_distributed(classid oid, objid oid, objsubid int,checkobjectexistence bool)
*
* removes the entry for an object address from pg_dist_object. Only removes the entry if
* the object does not exist anymore.
* Removes the entry for an object address from pg_dist_object. If checkobjectexistence is true,
* throws an error if the object still exists.
*/
Datum
citus_unmark_object_distributed(PG_FUNCTION_ARGS)
@ -109,6 +109,12 @@ citus_unmark_object_distributed(PG_FUNCTION_ARGS)
Oid classid = PG_GETARG_OID(0);
Oid objid = PG_GETARG_OID(1);
int32 objsubid = PG_GETARG_INT32(2);
bool checkObjectExistence = true;
if (!PG_ARGISNULL(3))
{
checkObjectExistence = PG_GETARG_BOOL(3);
}
ObjectAddress address = { 0 };
ObjectAddressSubSet(address, classid, objid, objsubid);
@ -119,7 +125,7 @@ citus_unmark_object_distributed(PG_FUNCTION_ARGS)
PG_RETURN_VOID();
}
if (ObjectExists(&address))
if (checkObjectExistence && ObjectExists(&address))
{
ereport(ERROR, (errmsg("object still exists"),
errdetail("the %s \"%s\" still exists",

View File

@ -2522,6 +2522,8 @@ AvailableExtensionVersion(void)
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("citus extension is not found")));
return NULL; /* keep compiler happy */
}

View File

@ -492,19 +492,7 @@ stop_metadata_sync_to_node(PG_FUNCTION_ARGS)
bool
ClusterHasKnownMetadataWorkers()
{
bool workerWithMetadata = false;
if (!IsCoordinator())
{
workerWithMetadata = true;
}
if (workerWithMetadata || HasMetadataWorkers())
{
return true;
}
return false;
return !IsCoordinator() || HasMetadataWorkers();
}
@ -1176,7 +1164,7 @@ DistributionDeleteMetadataCommand(Oid relationId)
char *qualifiedRelationName = generate_qualified_relation_name(relationId);
appendStringInfo(deleteCommand,
"SELECT pg_catalog.citus_internal_delete_partition_metadata(%s)",
"SELECT citus_internal.delete_partition_metadata(%s)",
quote_literal_cstr(qualifiedRelationName));
return deleteCommand->data;
@ -1259,7 +1247,7 @@ ShardListInsertCommand(List *shardIntervalList)
appendStringInfo(insertPlacementCommand, ") ");
appendStringInfo(insertPlacementCommand,
"SELECT citus_internal_add_placement_metadata("
"SELECT citus_internal.add_placement_metadata("
"shardid, shardlength, groupid, placementid) "
"FROM placement_data;");
@ -1315,7 +1303,7 @@ ShardListInsertCommand(List *shardIntervalList)
appendStringInfo(insertShardCommand, ") ");
appendStringInfo(insertShardCommand,
"SELECT citus_internal_add_shard_metadata(relationname, shardid, "
"SELECT citus_internal.add_shard_metadata(relationname, shardid, "
"storagetype, shardminvalue, shardmaxvalue) "
"FROM shard_data;");
@ -1354,7 +1342,7 @@ ShardDeleteCommandList(ShardInterval *shardInterval)
StringInfo deleteShardCommand = makeStringInfo();
appendStringInfo(deleteShardCommand,
"SELECT citus_internal_delete_shard_metadata(%ld);", shardId);
"SELECT citus_internal.delete_shard_metadata(%ld);", shardId);
return list_make1(deleteShardCommand->data);
}
@ -1424,7 +1412,7 @@ ColocationIdUpdateCommand(Oid relationId, uint32 colocationId)
StringInfo command = makeStringInfo();
char *qualifiedRelationName = generate_qualified_relation_name(relationId);
appendStringInfo(command,
"SELECT citus_internal_update_relation_colocation(%s::regclass, %d)",
"SELECT citus_internal.update_relation_colocation(%s::regclass, %d)",
quote_literal_cstr(qualifiedRelationName), colocationId);
return command->data;
@ -1649,6 +1637,74 @@ GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList,
}
/*
* GetDependentDependentRelationsWithSequence returns a list of oids of
* relations that have have a dependency on the given sequence.
* There are three types of dependencies:
* 1. direct auto (owned sequences), created using SERIAL or BIGSERIAL
* 2. indirect auto (through an AttrDef), created using DEFAULT nextval('..')
* 3. internal, created using GENERATED ALWAYS AS IDENTITY
*
* Depending on the passed deptype, we return the relations that have the
* given type(s):
* - DEPENDENCY_AUTO returns both 1 and 2
* - DEPENDENCY_INTERNAL returns 3
*
* The returned list can contain duplicates, as the same relation can have
* multiple dependencies on the sequence.
*/
List *
GetDependentRelationsWithSequence(Oid sequenceOid, char depType)
{
List *relations = NIL;
ScanKeyData key[2];
HeapTuple tup;
Relation depRel = table_open(DependRelationId, AccessShareLock);
ScanKeyInit(&key[0],
Anum_pg_depend_classid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(RelationRelationId));
ScanKeyInit(&key[1],
Anum_pg_depend_objid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(sequenceOid));
SysScanDesc scan = systable_beginscan(depRel, DependDependerIndexId, true,
NULL, lengthof(key), key);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
if (
deprec->refclassid == RelationRelationId &&
deprec->refobjsubid != 0 &&
deprec->deptype == depType)
{
relations = lappend_oid(relations, deprec->refobjid);
}
}
systable_endscan(scan);
table_close(depRel, AccessShareLock);
if (depType == DEPENDENCY_AUTO)
{
Oid attrDefOid;
List *attrDefOids = GetAttrDefsFromSequence(sequenceOid);
foreach_oid(attrDefOid, attrDefOids)
{
ObjectAddress columnAddress = GetAttrDefaultColumnAddress(attrDefOid);
relations = lappend_oid(relations, columnAddress.objectId);
}
}
return relations;
}
/*
* GetSequencesFromAttrDef returns a list of sequence OIDs that have
* dependency with the given attrdefOid in pg_depend
@ -1694,6 +1750,90 @@ GetSequencesFromAttrDef(Oid attrdefOid)
}
#if PG_VERSION_NUM < PG_VERSION_15
/*
* Given a pg_attrdef OID, return the relation OID and column number of
* the owning column (represented as an ObjectAddress for convenience).
*
* Returns InvalidObjectAddress if there is no such pg_attrdef entry.
*/
ObjectAddress
GetAttrDefaultColumnAddress(Oid attrdefoid)
{
ObjectAddress result = InvalidObjectAddress;
ScanKeyData skey[1];
HeapTuple tup;
Relation attrdef = table_open(AttrDefaultRelationId, AccessShareLock);
ScanKeyInit(&skey[0],
Anum_pg_attrdef_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(attrdefoid));
SysScanDesc scan = systable_beginscan(attrdef, AttrDefaultOidIndexId, true,
NULL, 1, skey);
if (HeapTupleIsValid(tup = systable_getnext(scan)))
{
Form_pg_attrdef atdform = (Form_pg_attrdef) GETSTRUCT(tup);
result.classId = RelationRelationId;
result.objectId = atdform->adrelid;
result.objectSubId = atdform->adnum;
}
systable_endscan(scan);
table_close(attrdef, AccessShareLock);
return result;
}
#endif
/*
* GetAttrDefsFromSequence returns a list of attrdef OIDs that have
* a dependency on the given sequence
*/
List *
GetAttrDefsFromSequence(Oid seqOid)
{
List *attrDefsResult = NIL;
ScanKeyData key[2];
HeapTuple tup;
Relation depRel = table_open(DependRelationId, AccessShareLock);
ScanKeyInit(&key[0],
Anum_pg_depend_refclassid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(RelationRelationId));
ScanKeyInit(&key[1],
Anum_pg_depend_refobjid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(seqOid));
SysScanDesc scan = systable_beginscan(depRel, DependReferenceIndexId, true,
NULL, lengthof(key), key);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
if (deprec->classid == AttrDefaultRelationId &&
deprec->deptype == DEPENDENCY_NORMAL)
{
attrDefsResult = lappend_oid(attrDefsResult, deprec->objid);
}
}
systable_endscan(scan);
table_close(depRel, AccessShareLock);
return attrDefsResult;
}
/*
* GetDependentFunctionsWithRelation returns the dependent functions for the
* given relation id.
@ -4056,7 +4196,7 @@ citus_internal_database_command(PG_FUNCTION_ARGS)
}
else
{
ereport(ERROR, (errmsg("citus_internal_database_command() can only be used "
ereport(ERROR, (errmsg("citus_internal.database_command() can only be used "
"for CREATE DATABASE command by Citus.")));
}
@ -4209,7 +4349,7 @@ ColocationGroupDeleteCommand(uint32 colocationId)
StringInfo deleteColocationCommand = makeStringInfo();
appendStringInfo(deleteColocationCommand,
"SELECT pg_catalog.citus_internal_delete_colocation_metadata(%d)",
"SELECT citus_internal.delete_colocation_metadata(%d)",
colocationId);
return deleteColocationCommand->data;
@ -4225,7 +4365,7 @@ TenantSchemaInsertCommand(Oid schemaId, uint32 colocationId)
{
StringInfo command = makeStringInfo();
appendStringInfo(command,
"SELECT pg_catalog.citus_internal_add_tenant_schema(%s, %u)",
"SELECT citus_internal.add_tenant_schema(%s, %u)",
RemoteSchemaIdExpressionById(schemaId), colocationId);
return command->data;
@ -4241,7 +4381,7 @@ TenantSchemaDeleteCommand(char *schemaName)
{
StringInfo command = makeStringInfo();
appendStringInfo(command,
"SELECT pg_catalog.citus_internal_delete_tenant_schema(%s)",
"SELECT citus_internal.delete_tenant_schema(%s)",
RemoteSchemaIdExpressionByName(schemaName));
return command->data;
@ -4258,7 +4398,7 @@ UpdateNoneDistTableMetadataCommand(Oid relationId, char replicationModel,
{
StringInfo command = makeStringInfo();
appendStringInfo(command,
"SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(%s, '%c', %u, %s)",
"SELECT citus_internal.update_none_dist_table_metadata(%s, '%c', %u, %s)",
RemoteTableIdExpression(relationId), replicationModel, colocationId,
autoConverted ? "true" : "false");
@ -4276,7 +4416,7 @@ AddPlacementMetadataCommand(uint64 shardId, uint64 placementId,
{
StringInfo command = makeStringInfo();
appendStringInfo(command,
"SELECT citus_internal_add_placement_metadata(%ld, %ld, %d, %ld)",
"SELECT citus_internal.add_placement_metadata(%ld, %ld, %d, %ld)",
shardId, shardLength, groupId, placementId);
return command->data;
}
@ -4291,7 +4431,7 @@ DeletePlacementMetadataCommand(uint64 placementId)
{
StringInfo command = makeStringInfo();
appendStringInfo(command,
"SELECT pg_catalog.citus_internal_delete_placement_metadata(%ld)",
"SELECT citus_internal.delete_placement_metadata(%ld)",
placementId);
return command->data;
}
@ -4957,7 +5097,7 @@ SendTenantSchemaMetadataCommands(MetadataSyncContext *context)
StringInfo insertTenantSchemaCommand = makeStringInfo();
appendStringInfo(insertTenantSchemaCommand,
"SELECT pg_catalog.citus_internal_add_tenant_schema(%s, %u)",
"SELECT citus_internal.add_tenant_schema(%s, %u)",
RemoteSchemaIdExpressionById(tenantSchemaForm->schemaid),
tenantSchemaForm->colocationid);

View File

@ -217,6 +217,9 @@ citus_set_coordinator_host(PG_FUNCTION_ARGS)
EnsureTransactionalMetadataSyncMode();
}
/* prevent concurrent modification */
LockRelationOid(DistNodeRelationId(), RowExclusiveLock);
bool isCoordinatorInMetadata = false;
WorkerNode *coordinatorNode = PrimaryNodeForGroup(COORDINATOR_GROUP_ID,
&isCoordinatorInMetadata);
@ -507,7 +510,13 @@ citus_disable_node(PG_FUNCTION_ARGS)
{
text *nodeNameText = PG_GETARG_TEXT_P(0);
int32 nodePort = PG_GETARG_INT32(1);
bool synchronousDisableNode = PG_GETARG_BOOL(2);
bool synchronousDisableNode = 1;
Assert(PG_NARGS() == 2 || PG_NARGS() == 3);
if (PG_NARGS() == 3)
{
synchronousDisableNode = PG_GETARG_BOOL(2);
}
char *nodeName = text_to_cstring(nodeNameText);
WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort);
@ -1692,7 +1701,7 @@ EnsureParentSessionHasExclusiveLockOnPgDistNode(pid_t parentSessionPid)
if (!parentHasExclusiveLock)
{
ereport(ERROR, (errmsg("lock is not held by the caller. Unexpected caller "
"for citus_internal_mark_node_not_synced")));
"for citus_internal.mark_node_not_synced")));
}
}

View File

@ -426,10 +426,9 @@ ExecuteDropShardPlacementCommandRemotely(ShardPlacement *shardPlacement,
errdetail("Marking this shard placement for "
"deletion")));
InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
shardRelationName,
shardPlacement->groupId,
CLEANUP_DEFERRED_ON_SUCCESS);
InsertCleanupOnSuccessRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
shardRelationName,
shardPlacement->groupId);
return;
}

View File

@ -92,6 +92,8 @@ static bool TryDropReplicationSlotOutsideTransaction(char *replicationSlotName,
char *nodeName,
int nodePort);
static bool TryDropUserOutsideTransaction(char *username, char *nodeName, int nodePort);
static bool TryDropDatabaseOutsideTransaction(char *databaseName, char *nodeName,
int nodePort);
static CleanupRecord * GetCleanupRecordByNameAndType(char *objectName,
CleanupObject type);
@ -141,7 +143,6 @@ Datum
citus_cleanup_orphaned_resources(PG_FUNCTION_ARGS)
{
CheckCitusVersion(ERROR);
EnsureCoordinator();
PreventInTransactionBlock(true, "citus_cleanup_orphaned_resources");
int droppedCount = DropOrphanedResourcesForCleanup();
@ -245,12 +246,6 @@ TryDropOrphanedResources()
static int
DropOrphanedResourcesForCleanup()
{
/* Only runs on Coordinator */
if (!IsCoordinator())
{
return 0;
}
List *cleanupRecordList = ListCleanupRecords();
/*
@ -452,15 +447,15 @@ CompareCleanupRecordsByObjectType(const void *leftElement, const void *rightElem
/*
* InsertCleanupRecordInCurrentTransaction inserts a new pg_dist_cleanup entry
* InsertCleanupOnSuccessRecordInCurrentTransaction inserts a new pg_dist_cleanup entry
* as part of the current transaction. This is primarily useful for deferred drop scenarios,
* since these records would roll back in case of operation failure.
* since these records would roll back in case of operation failure. And for the same reason,
* always sets the policy type to CLEANUP_DEFERRED_ON_SUCCESS.
*/
void
InsertCleanupRecordInCurrentTransaction(CleanupObject objectType,
char *objectName,
int nodeGroupId,
CleanupPolicy policy)
InsertCleanupOnSuccessRecordInCurrentTransaction(CleanupObject objectType,
char *objectName,
int nodeGroupId)
{
/* We must have a valid OperationId. Any operation requring cleanup
* will call RegisterOperationNeedingCleanup.
@ -482,7 +477,8 @@ InsertCleanupRecordInCurrentTransaction(CleanupObject objectType,
values[Anum_pg_dist_cleanup_object_type - 1] = Int32GetDatum(objectType);
values[Anum_pg_dist_cleanup_object_name - 1] = CStringGetTextDatum(objectName);
values[Anum_pg_dist_cleanup_node_group_id - 1] = Int32GetDatum(nodeGroupId);
values[Anum_pg_dist_cleanup_policy_type - 1] = Int32GetDatum(policy);
values[Anum_pg_dist_cleanup_policy_type - 1] =
Int32GetDatum(CLEANUP_DEFERRED_ON_SUCCESS);
/* open cleanup relation and insert new tuple */
Oid relationId = DistCleanupRelationId();
@ -499,23 +495,27 @@ InsertCleanupRecordInCurrentTransaction(CleanupObject objectType,
/*
* InsertCleanupRecordInSubtransaction inserts a new pg_dist_cleanup entry in a
* InsertCleanupRecordOutsideTransaction inserts a new pg_dist_cleanup entry in a
* separate transaction to ensure the record persists after rollback. We should
* delete these records if the operation completes successfully.
*
* For failure scenarios, use a subtransaction (direct insert via localhost).
* This is used in scenarios where we need to cleanup resources on operation
* completion (CLEANUP_ALWAYS) or on failure (CLEANUP_ON_FAILURE).
*/
void
InsertCleanupRecordInSubtransaction(CleanupObject objectType,
char *objectName,
int nodeGroupId,
CleanupPolicy policy)
InsertCleanupRecordOutsideTransaction(CleanupObject objectType,
char *objectName,
int nodeGroupId,
CleanupPolicy policy)
{
/* We must have a valid OperationId. Any operation requring cleanup
* will call RegisterOperationNeedingCleanup.
*/
Assert(CurrentOperationId != INVALID_OPERATION_ID);
/* assert the circumstance noted in function comment */
Assert(policy == CLEANUP_ALWAYS || policy == CLEANUP_ON_FAILURE);
StringInfo sequenceName = makeStringInfo();
appendStringInfo(sequenceName, "%s.%s",
PG_CATALOG,
@ -603,6 +603,12 @@ TryDropResourceByCleanupRecordOutsideTransaction(CleanupRecord *record,
return TryDropUserOutsideTransaction(record->objectName, nodeName, nodePort);
}
case CLEANUP_OBJECT_DATABASE:
{
return TryDropDatabaseOutsideTransaction(record->objectName, nodeName,
nodePort);
}
default:
{
ereport(WARNING, (errmsg(
@ -883,6 +889,69 @@ TryDropUserOutsideTransaction(char *username,
}
/*
* TryDropDatabaseOutsideTransaction drops the database with the given name
* if it exists.
*/
static bool
TryDropDatabaseOutsideTransaction(char *databaseName, char *nodeName, int nodePort)
{
int connectionFlags = (OUTSIDE_TRANSACTION | FORCE_NEW_CONNECTION);
MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlags,
nodeName, nodePort,
CitusExtensionOwnerName(),
NULL);
if (PQstatus(connection->pgConn) != CONNECTION_OK)
{
return false;
}
/*
* We want to disable DDL propagation and set lock_timeout before issuing
* the DROP DATABASE command but we cannot do so in a way that's scoped
* to the DROP DATABASE command. This is because, we cannot use a
* transaction block for the DROP DATABASE command.
*
* For this reason, to avoid leaking the lock_timeout and DDL propagation
* settings to future commands, we force the connection to close at the end
* of the transaction.
*/
ForceConnectionCloseAtTransactionEnd(connection);
/*
* The DROP DATABASE command should not propagate, so we disable DDL
* propagation.
*/
List *commandList = list_make3(
"SET lock_timeout TO '1s'",
"SET citus.enable_ddl_propagation TO OFF;",
psprintf("DROP DATABASE IF EXISTS %s;", quote_identifier(databaseName))
);
bool executeCommand = true;
const char *commandString = NULL;
foreach_ptr(commandString, commandList)
{
/*
* Cannot use SendOptionalCommandListToWorkerOutsideTransactionWithConnection()
* because we don't want to open a transaction block on remote nodes as DROP
* DATABASE commands cannot be run inside a transaction block.
*/
if (ExecuteOptionalRemoteCommand(connection, commandString, NULL) !=
RESPONSE_OKAY)
{
executeCommand = false;
break;
}
}
CloseConnection(connection);
return executeCommand;
}
/*
* ErrorIfCleanupRecordForShardExists errors out if a cleanup record for the given
* shard name exists.

View File

@ -384,6 +384,7 @@ CheckRebalanceStateInvariants(const RebalanceState *state)
Assert(shardCost->cost <= prevShardCost->cost);
}
totalCost += shardCost->cost;
prevShardCost = shardCost;
}
/* Check that utilization field is up to date. */

View File

@ -733,11 +733,11 @@ CreateSplitShardsForShardGroup(List *shardGroupSplitIntervalListList,
workerPlacementNode->workerPort)));
}
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
ConstructQualifiedShardName(
shardInterval),
workerPlacementNode->groupId,
CLEANUP_ON_FAILURE);
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
ConstructQualifiedShardName(
shardInterval),
workerPlacementNode->groupId,
CLEANUP_ON_FAILURE);
/* Create new split child shard on the specified placement list */
CreateObjectOnPlacement(splitShardCreationCommandList,
@ -1314,7 +1314,7 @@ DropShardListMetadata(List *shardIntervalList)
{
ListCell *commandCell = NULL;
/* send the commands one by one (calls citus_internal_delete_shard_metadata internally) */
/* send the commands one by one (calls citus_internal.delete_shard_metadata internally) */
List *shardMetadataDeleteCommandList = ShardDeleteCommandList(shardInterval);
foreach(commandCell, shardMetadataDeleteCommandList)
{
@ -1717,11 +1717,11 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList,
/* Log shard in pg_dist_cleanup. Given dummy shards are transient resources,
* we want to cleanup irrespective of operation success or failure.
*/
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
ConstructQualifiedShardName(
shardInterval),
workerPlacementNode->groupId,
CLEANUP_ALWAYS);
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
ConstructQualifiedShardName(
shardInterval),
workerPlacementNode->groupId,
CLEANUP_ALWAYS);
/* Create dummy source shard on the specified placement list */
CreateObjectOnPlacement(splitShardCreationCommandList,
@ -1780,11 +1780,11 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList,
/* Log shard in pg_dist_cleanup. Given dummy shards are transient resources,
* we want to cleanup irrespective of operation success or failure.
*/
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
ConstructQualifiedShardName(
shardInterval),
sourceWorkerNode->groupId,
CLEANUP_ALWAYS);
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
ConstructQualifiedShardName(
shardInterval),
sourceWorkerNode->groupId,
CLEANUP_ALWAYS);
/* Create dummy split child shard on source worker node */
CreateObjectOnPlacement(splitShardCreationCommandList, sourceWorkerNode);

View File

@ -294,6 +294,17 @@ citus_move_shard_placement(PG_FUNCTION_ARGS)
CheckCitusVersion(ERROR);
EnsureCoordinator();
List *referenceTableIdList = NIL;
if (HasNodesWithMissingReferenceTables(&referenceTableIdList))
{
ereport(ERROR, (errmsg("there are missing reference tables on some nodes"),
errhint("Copy reference tables first with "
"replicate_reference_tables() or use "
"citus_rebalance_start() that will do it automatically."
)));
}
int64 shardId = PG_GETARG_INT64(0);
char *sourceNodeName = text_to_cstring(PG_GETARG_TEXT_P(1));
int32 sourceNodePort = PG_GETARG_INT32(2);
@ -593,10 +604,10 @@ InsertDeferredDropCleanupRecordsForShards(List *shardIntervalList)
* We also log cleanup record in the current transaction. If the current transaction rolls back,
* we do not generate a record at all.
*/
InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
qualifiedShardName,
placement->groupId,
CLEANUP_DEFERRED_ON_SUCCESS);
InsertCleanupOnSuccessRecordInCurrentTransaction(
CLEANUP_OBJECT_SHARD_PLACEMENT,
qualifiedShardName,
placement->groupId);
}
}
}
@ -623,10 +634,9 @@ InsertCleanupRecordsForShardPlacementsOnNode(List *shardIntervalList,
* We also log cleanup record in the current transaction. If the current transaction rolls back,
* we do not generate a record at all.
*/
InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
qualifiedShardName,
groupId,
CLEANUP_DEFERRED_ON_SUCCESS);
InsertCleanupOnSuccessRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
qualifiedShardName,
groupId);
}
}
@ -1382,10 +1392,11 @@ CopyShardTablesViaLogicalReplication(List *shardIntervalList, char *sourceNodeNa
char *tableOwner = TableOwner(shardInterval->relationId);
/* drop the shard we created on the target, in case of failure */
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
ConstructQualifiedShardName(shardInterval),
GroupForNode(targetNodeName, targetNodePort),
CLEANUP_ON_FAILURE);
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
ConstructQualifiedShardName(shardInterval),
GroupForNode(targetNodeName,
targetNodePort),
CLEANUP_ON_FAILURE);
SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort,
tableOwner,
@ -1455,10 +1466,11 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName,
char *tableOwner = TableOwner(shardInterval->relationId);
/* drop the shard we created on the target, in case of failure */
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
ConstructQualifiedShardName(shardInterval),
GroupForNode(targetNodeName, targetNodePort),
CLEANUP_ON_FAILURE);
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT,
ConstructQualifiedShardName(shardInterval),
GroupForNode(targetNodeName,
targetNodePort),
CLEANUP_ON_FAILURE);
SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort,
tableOwner, ddlCommandList);
@ -2035,7 +2047,7 @@ UpdateColocatedShardPlacementMetadataOnWorkers(int64 shardId,
StringInfo updateCommand = makeStringInfo();
appendStringInfo(updateCommand,
"SELECT citus_internal_update_placement_metadata(%ld, %d, %d)",
"SELECT citus_internal.update_placement_metadata(%ld, %d, %d)",
colocatedShard->shardId,
sourceGroupId, targetGroupId);
SendCommandToWorkersWithMetadata(updateCommand->data);

View File

@ -91,6 +91,10 @@ bool InDelegatedFunctionCall = false;
static bool
contain_param_walker(Node *node, void *context)
{
if (node == NULL)
{
return false;
}
if (IsA(node, Param))
{
Param *paramNode = (Param *) node;

View File

@ -1810,6 +1810,8 @@ CastExpr(Expr *expr, Oid sourceType, Oid targetType, Oid targetCollation,
ereport(ERROR, (errmsg("could not find a conversion path from type %d to %d",
sourceType, targetType)));
}
return NULL; /* keep compiler happy */
}

View File

@ -182,14 +182,6 @@ CreateRouterMergePlan(Oid targetRelationId, Query *originalQuery, Query *query,
return distributedPlan;
}
Var *insertVar =
FetchAndValidateInsertVarIfExists(targetRelationId, originalQuery);
if (insertVar &&
!IsDistributionColumnInMergeSource((Expr *) insertVar, originalQuery, true))
{
ereport(ERROR, (errmsg("MERGE INSERT must use the source table "
"distribution column value")));
}
Job *job = RouterJob(originalQuery, plannerRestrictionContext,
&distributedPlan->planningError);
@ -251,14 +243,27 @@ CreateNonPushableMergePlan(Oid targetRelationId, uint64 planId, Query *originalQ
CitusTableCacheEntry *targetRelation = GetCitusTableCacheEntry(targetRelationId);
/*
* Get the index of the column in the source query that will be utilized
* to repartition the source rows, ensuring colocation with the target
*/
distributedPlan->sourceResultRepartitionColumnIndex =
SourceResultPartitionColumnIndex(mergeQuery,
sourceQuery->targetList,
targetRelation);
if (IsCitusTableType(targetRelation->relationId, SINGLE_SHARD_DISTRIBUTED))
{
/*
* if target table is SINGLE_SHARD_DISTRIBUTED let's set this to invalid -1
* so later in execution phase we don't rely on this value and try to find single shard of target instead.
*/
distributedPlan->sourceResultRepartitionColumnIndex = -1;
}
else
{
/*
* Get the index of the column in the source query that will be utilized
* to repartition the source rows, ensuring colocation with the target
*/
distributedPlan->sourceResultRepartitionColumnIndex =
SourceResultPartitionColumnIndex(mergeQuery,
sourceQuery->targetList,
targetRelation);
}
/*
* Make a copy of the source query, since following code scribbles it
@ -270,11 +275,11 @@ CreateNonPushableMergePlan(Oid targetRelationId, uint64 planId, Query *originalQ
int cursorOptions = CURSOR_OPT_PARALLEL_OK;
PlannedStmt *sourceRowsPlan = pg_plan_query(sourceQueryCopy, NULL, cursorOptions,
boundParams);
bool repartitioned = IsRedistributablePlan(sourceRowsPlan->planTree) &&
IsSupportedRedistributionTarget(targetRelationId);
bool isRepartitionAllowed = IsRedistributablePlan(sourceRowsPlan->planTree) &&
IsSupportedRedistributionTarget(targetRelationId);
/* If plan is distributed, no work at the coordinator */
if (repartitioned)
if (isRepartitionAllowed)
{
distributedPlan->modifyWithSelectMethod = MODIFY_WITH_SELECT_REPARTITION;
}
@ -853,7 +858,7 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte,
newRangeTableRef->rtindex = SINGLE_RTE_INDEX;
sourceResultsQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
sourceResultsQuery->targetList =
CreateAllTargetListForRelation(sourceRte->relid, requiredAttributes);
CreateFilteredTargetListForRelation(sourceRte->relid, requiredAttributes);
List *restrictionList =
GetRestrictInfoListForRelation(sourceRte, plannerRestrictionContext);
List *copyRestrictionList = copyObject(restrictionList);
@ -1124,6 +1129,27 @@ DeferErrorIfRoutableMergeNotSupported(Query *query, List *rangeTableList,
"repartitioning")));
return deferredError;
}
/*
* If execution has reached this point, it indicates that the query can be delegated to the worker.
* However, before proceeding with this delegation, we need to confirm that the user is utilizing
* the distribution column of the source table in the Insert variable.
* If this is not the case, we should refrain from pushing down the query.
* This is just a deffered error which will be handle by caller.
*/
Var *insertVar =
FetchAndValidateInsertVarIfExists(targetRelationId, query);
if (insertVar &&
!IsDistributionColumnInMergeSource((Expr *) insertVar, query, true))
{
ereport(DEBUG1, (errmsg(
"MERGE INSERT must use the source table distribution column value for push down to workers. Otherwise, repartitioning will be applied")));
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"MERGE INSERT must use the source table distribution column value for push down to workers. Otherwise, repartitioning will be applied",
NULL, NULL);
}
return NULL;
}
@ -1260,13 +1286,6 @@ static int
SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList,
CitusTableCacheEntry *targetRelation)
{
if (IsCitusTableType(targetRelation->relationId, SINGLE_SHARD_DISTRIBUTED))
{
ereport(ERROR, (errmsg("MERGE operation across distributed schemas "
"or with a row-based distributed table is "
"not yet supported")));
}
/* Get all the Join conditions from the ON clause */
List *mergeJoinConditionList = WhereClauseList(mergeQuery->jointree);
Var *targetColumn = targetRelation->partitionColumn;

View File

@ -190,6 +190,14 @@ PG_FUNCTION_INFO_V1(worker_save_query_explain_analyze);
void
CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es)
{
#if PG_VERSION_NUM >= PG_VERSION_16
if (es->generic)
{
ereport(ERROR, (errmsg(
"EXPLAIN GENERIC_PLAN is currently not supported for Citus tables")));
}
#endif
CitusScanState *scanState = (CitusScanState *) node;
DistributedPlan *distributedPlan = scanState->distributedPlan;
EState *executorState = ScanStateGetExecutorState(scanState);
@ -992,18 +1000,12 @@ BuildRemoteExplainQuery(char *queryString, ExplainState *es)
appendStringInfo(explainQuery,
"EXPLAIN (ANALYZE %s, VERBOSE %s, "
"COSTS %s, BUFFERS %s, WAL %s, "
#if PG_VERSION_NUM >= PG_VERSION_16
"GENERIC_PLAN %s, "
#endif
"TIMING %s, SUMMARY %s, FORMAT %s) %s",
es->analyze ? "TRUE" : "FALSE",
es->verbose ? "TRUE" : "FALSE",
es->costs ? "TRUE" : "FALSE",
es->buffers ? "TRUE" : "FALSE",
es->wal ? "TRUE" : "FALSE",
#if PG_VERSION_NUM >= PG_VERSION_16
es->generic ? "TRUE" : "FALSE",
#endif
es->timing ? "TRUE" : "FALSE",
es->summary ? "TRUE" : "FALSE",
formatStr,

View File

@ -1557,9 +1557,10 @@ MasterAggregateMutator(Node *originalNode, MasterAggregateWalkerContext *walkerC
}
else if (IsA(originalNode, Var))
{
Var *newColumn = copyObject((Var *) originalNode);
newColumn->varno = masterTableId;
newColumn->varattno = walkerContext->columnId;
Var *origColumn = (Var *) originalNode;
Var *newColumn = makeVar(masterTableId, walkerContext->columnId,
origColumn->vartype, origColumn->vartypmod,
origColumn->varcollid, origColumn->varlevelsup);
walkerContext->columnId++;
newNode = (Node *) newColumn;
@ -4753,22 +4754,35 @@ WorkerLimitCount(Node *limitCount, Node *limitOffset, OrderByLimitReference
if (workerLimitNode != NULL && limitOffset != NULL)
{
Const *workerLimitConst = (Const *) workerLimitNode;
Const *workerOffsetConst = (Const *) limitOffset;
int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue);
int64 workerOffsetCount = DatumGetInt64(workerOffsetConst->constvalue);
workerLimitCount = workerLimitCount + workerOffsetCount;
workerLimitNode = (Node *) MakeIntegerConstInt64(workerLimitCount);
/* Only update the worker limit if the const is not null.*/
if (!workerLimitConst->constisnull)
{
Const *workerOffsetConst = (Const *) limitOffset;
int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue);
/* If the offset is null, it defaults to 0 when cast to int64. */
int64 workerOffsetCount = DatumGetInt64(workerOffsetConst->constvalue);
workerLimitCount = workerLimitCount + workerOffsetCount;
workerLimitNode = (Node *) MakeIntegerConstInt64(workerLimitCount);
}
}
/* display debug message on limit push down */
if (workerLimitNode != NULL)
{
Const *workerLimitConst = (Const *) workerLimitNode;
int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue);
if (!workerLimitConst->constisnull)
{
int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue);
ereport(DEBUG1, (errmsg("push down of limit count: " INT64_FORMAT,
workerLimitCount)));
ereport(DEBUG1, (errmsg("push down of limit count: " INT64_FORMAT,
workerLimitCount)));
}
else
{
ereport(DEBUG1, (errmsg("push down of limit count: ALL")));
}
}
return workerLimitNode;

View File

@ -45,8 +45,6 @@
static RangeTblEntry * AnchorRte(Query *subquery);
static List * UnionRelationRestrictionLists(List *firstRelationList,
List *secondRelationList);
static List * CreateFilteredTargetListForRelation(Oid relationId,
List *requiredAttributes);
static List * CreateDummyTargetList(Oid relationId, List *requiredAttributes);
static TargetEntry * CreateTargetEntryForColumn(Form_pg_attribute attributeTuple, Index
rteIndex,
@ -378,7 +376,7 @@ CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes)
* only the required columns of the given relation. If there is not required
* columns then a dummy NULL column is put as the only entry.
*/
static List *
List *
CreateFilteredTargetListForRelation(Oid relationId, List *requiredAttributes)
{
Relation relation = relation_open(relationId, AccessShareLock);

View File

@ -1335,10 +1335,10 @@ CreatePublications(MultiConnection *connection,
WorkerNode *worker = FindWorkerNode(connection->hostname,
connection->port);
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_PUBLICATION,
entry->name,
worker->groupId,
CLEANUP_ALWAYS);
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_PUBLICATION,
entry->name,
worker->groupId,
CLEANUP_ALWAYS);
ExecuteCriticalRemoteCommand(connection, DISABLE_DDL_PROPAGATION);
ExecuteCriticalRemoteCommand(connection, createPublicationCommand->data);
@ -1435,10 +1435,10 @@ CreateReplicationSlots(MultiConnection *sourceConnection,
WorkerNode *worker = FindWorkerNode(sourceConnection->hostname,
sourceConnection->port);
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_REPLICATION_SLOT,
replicationSlot->name,
worker->groupId,
CLEANUP_ALWAYS);
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_REPLICATION_SLOT,
replicationSlot->name,
worker->groupId,
CLEANUP_ALWAYS);
if (!firstReplicationSlot)
{
@ -1506,10 +1506,10 @@ CreateSubscriptions(MultiConnection *sourceConnection,
quote_identifier(GetUserNameFromId(ownerId, false))
)));
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_USER,
target->subscriptionOwnerName,
worker->groupId,
CLEANUP_ALWAYS);
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_USER,
target->subscriptionOwnerName,
worker->groupId,
CLEANUP_ALWAYS);
StringInfo conninfo = makeStringInfo();
appendStringInfo(conninfo, "host='%s' port=%d user='%s' dbname='%s' "
@ -1567,10 +1567,10 @@ CreateSubscriptions(MultiConnection *sourceConnection,
pfree(createSubscriptionCommand->data);
pfree(createSubscriptionCommand);
InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SUBSCRIPTION,
target->subscriptionName,
worker->groupId,
CLEANUP_ALWAYS);
InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SUBSCRIPTION,
target->subscriptionName,
worker->groupId,
CLEANUP_ALWAYS);
ExecuteCriticalRemoteCommand(target->superuserConnection, psprintf(
"ALTER SUBSCRIPTION %s OWNER TO %s",

View File

@ -895,22 +895,13 @@ DecrementExternalClientBackendCounterAtExit(int code, Datum arg)
static void
CreateRequiredDirectories(void)
{
const char *subdirs[] = {
"pg_foreign_file",
"pg_foreign_file/cached",
("base/" PG_JOB_CACHE_DIR)
};
const char *subdir = ("base/" PG_JOB_CACHE_DIR);
for (int dirNo = 0; dirNo < lengthof(subdirs); dirNo++)
if (MakePGDirectory(subdir) != 0 && errno != EEXIST)
{
int ret = mkdir(subdirs[dirNo], S_IRWXU);
if (ret != 0 && errno != EEXIST)
{
ereport(ERROR, (errcode_for_file_access(),
errmsg("could not create directory \"%s\": %m",
subdirs[dirNo])));
}
ereport(ERROR, (errcode_for_file_access(),
errmsg("could not create directory \"%s\": %m",
subdir)));
}
}
@ -2899,14 +2890,27 @@ ApplicationNameAssignHook(const char *newval, void *extra)
DetermineCitusBackendType(newval);
/*
* AssignGlobalPID might read from catalog tables to get the the local
* nodeid. But ApplicationNameAssignHook might be called before catalog
* access is available to the backend (such as in early stages of
* authentication). We use StartupCitusBackend to initialize the global pid
* after catalogs are available. After that happens this hook becomes
* responsible to update the global pid on later application_name changes.
* So we set the FinishedStartupCitusBackend flag in StartupCitusBackend to
* indicate when this responsibility handoff has happened.
* We use StartupCitusBackend to initialize the global pid after catalogs
* are available. After that happens this hook becomes responsible to update
* the global pid on later application_name changes. So we set the
* FinishedStartupCitusBackend flag in StartupCitusBackend to indicate when
* this responsibility handoff has happened.
*
* Also note that when application_name changes, we don't actually need to
* try re-assigning the global pid for external client backends and
* background workers because application_name doesn't affect the global
* pid for such backends - note that !IsExternalClientBackend() check covers
* both types of backends. Plus,
* trying to re-assign the global pid for such backends would unnecessarily
* cause performing a catalog access when the cached local node id is
* invalidated. However, accessing to the catalog tables is dangerous in
* certain situations like when we're not in a transaction block. And for
* the other types of backends, i.e., the Citus internal backends, we need
* to re-assign the global pid when the application_name changes because for
* such backends we simply extract the global pid inherited from the
* originating backend from the application_name -that's specified by
* originating backend when openning that connection- and this doesn't require
* catalog access.
*
* Another solution to the catalog table acccess problem would be to update
* global pid lazily, like we do for HideShards. But that's not possible
@ -2916,7 +2920,7 @@ ApplicationNameAssignHook(const char *newval, void *extra)
* as reasonably possible, which is also why we extract global pids in the
* AuthHook already (extracting doesn't require catalog access).
*/
if (FinishedStartupCitusBackend)
if (FinishedStartupCitusBackend && !IsExternalClientBackend())
{
AssignGlobalPID(newval);
}
@ -2938,6 +2942,7 @@ NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source)
#if defined(ENABLE_GSS) && defined(ENABLE_SSPI)
"gsslib",
#endif
"host",
"keepalives",
"keepalives_count",
"keepalives_idle",

View File

@ -7,6 +7,8 @@
#include "udfs/start_management_transaction/12.2-1.sql"
#include "udfs/execute_command_on_remote_nodes_as_user/12.2-1.sql"
#include "udfs/mark_object_distributed/12.2-1.sql"
DROP FUNCTION pg_catalog.citus_unmark_object_distributed(oid, oid, int);
#include "udfs/citus_unmark_object_distributed/12.2-1.sql"
#include "udfs/commit_management_command_2pc/12.2-1.sql"
ALTER TABLE pg_catalog.pg_dist_transaction ADD COLUMN outer_xid xid8;
@ -29,3 +31,27 @@ REVOKE ALL ON FUNCTION citus_internal.start_management_transaction FROM PUBLIC;
#include "udfs/citus_internal_add_colocation_metadata/12.2-1.sql"
#include "udfs/citus_internal_add_object_metadata/12.2-1.sql"
#include "udfs/citus_internal_add_partition_metadata/12.2-1.sql"
#include "udfs/citus_internal_add_placement_metadata/12.2-1.sql"
#include "udfs/citus_internal_add_shard_metadata/12.2-1.sql"
#include "udfs/citus_internal_add_tenant_schema/12.2-1.sql"
#include "udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql"
#include "udfs/citus_internal_delete_colocation_metadata/12.2-1.sql"
#include "udfs/citus_internal_delete_partition_metadata/12.2-1.sql"
#include "udfs/citus_internal_delete_placement_metadata/12.2-1.sql"
#include "udfs/citus_internal_delete_shard_metadata/12.2-1.sql"
#include "udfs/citus_internal_delete_tenant_schema/12.2-1.sql"
#include "udfs/citus_internal_local_blocked_processes/12.2-1.sql"
#include "udfs/citus_internal_global_blocked_processes/12.2-1.sql"
#include "udfs/citus_blocking_pids/12.2-1.sql"
#include "udfs/citus_isolation_test_session_is_blocked/12.2-1.sql"
DROP VIEW IF EXISTS pg_catalog.citus_lock_waits;
#include "udfs/citus_lock_waits/12.2-1.sql"
#include "udfs/citus_internal_mark_node_not_synced/12.2-1.sql"
#include "udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql"
#include "udfs/citus_drop_trigger/12.2-1.sql"
#include "udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql"
#include "udfs/citus_internal_update_placement_metadata/12.2-1.sql"
#include "udfs/citus_internal_update_relation_colocation/12.2-1.sql"
#include "udfs/repl_origin_helper/12.2-1.sql"
#include "udfs/citus_finish_pg_upgrade/12.2-1.sql"

View File

@ -1,6 +1,6 @@
-- citus--12.2-1--12.1-1
DROP FUNCTION pg_catalog.citus_internal_database_command(text);
DROP FUNCTION citus_internal.database_command(text);
DROP FUNCTION citus_internal.acquire_citus_advisory_object_class_lock(int, cstring);
#include "../udfs/citus_add_rebalance_strategy/10.1-1.sql"
@ -18,6 +18,9 @@ DROP FUNCTION citus_internal.mark_object_distributed(
classId Oid, objectName text, objectId Oid, connectionUser text
);
DROP FUNCTION pg_catalog.citus_unmark_object_distributed(oid,oid,int,boolean);
#include "../udfs/citus_unmark_object_distributed/10.0-1.sql"
DROP FUNCTION citus_internal.commit_management_command_2pc();
ALTER TABLE pg_catalog.pg_dist_transaction DROP COLUMN outer_xid;
@ -26,4 +29,29 @@ REVOKE USAGE ON SCHEMA citus_internal FROM PUBLIC;
DROP FUNCTION citus_internal.add_colocation_metadata(int, int, int, regtype, oid);
DROP FUNCTION citus_internal.add_object_metadata(text, text[], text[], integer, integer, boolean);
DROP FUNCTION citus_internal.add_partition_metadata(regclass, "char", text, integer, "char");
DROP FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint);
DROP FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text);
DROP FUNCTION citus_internal.add_tenant_schema(oid, integer);
DROP FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock);
DROP FUNCTION citus_internal.delete_colocation_metadata(int);
DROP FUNCTION citus_internal.delete_partition_metadata(regclass);
DROP FUNCTION citus_internal.delete_placement_metadata(bigint);
DROP FUNCTION citus_internal.delete_shard_metadata(bigint);
DROP FUNCTION citus_internal.delete_tenant_schema(oid);
DROP FUNCTION citus_internal.local_blocked_processes();
#include "../udfs/citus_blocking_pids/11.0-1.sql"
#include "../udfs/citus_isolation_test_session_is_blocked/11.1-1.sql"
DROP VIEW IF EXISTS pg_catalog.citus_lock_waits;
#include "../udfs/citus_lock_waits/11.0-1.sql"
DROP FUNCTION citus_internal.global_blocked_processes();
DROP FUNCTION citus_internal.mark_node_not_synced(int, int);
DROP FUNCTION citus_internal.unregister_tenant_schema_globally(oid, text);
#include "../udfs/citus_drop_trigger/12.0-1.sql"
DROP FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean);
DROP FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer);
DROP FUNCTION citus_internal.update_relation_colocation(oid, int);
DROP FUNCTION citus_internal.start_replication_origin_tracking();
DROP FUNCTION citus_internal.stop_replication_origin_tracking();
DROP FUNCTION citus_internal.is_replication_origin_tracking_active();
#include "../udfs/citus_finish_pg_upgrade/12.1-1.sql"

View File

@ -0,0 +1,34 @@
DROP FUNCTION pg_catalog.citus_blocking_pids;
CREATE FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer)
RETURNS int4[] AS $$
DECLARE
mLocalBlockingPids int4[];
mRemoteBlockingPids int4[];
mLocalGlobalPid int8;
BEGIN
SELECT pg_catalog.old_pg_blocking_pids(pBlockedPid) INTO mLocalBlockingPids;
IF (array_length(mLocalBlockingPids, 1) > 0) THEN
RETURN mLocalBlockingPids;
END IF;
-- pg says we're not blocked locally; check whether we're blocked globally.
SELECT global_pid INTO mLocalGlobalPid
FROM get_all_active_transactions() WHERE process_id = pBlockedPid;
SELECT array_agg(global_pid) INTO mRemoteBlockingPids FROM (
WITH activeTransactions AS (
SELECT global_pid FROM get_all_active_transactions()
), blockingTransactions AS (
SELECT blocking_global_pid FROM citus_internal.global_blocked_processes()
WHERE waiting_global_pid = mLocalGlobalPid
)
SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions
WHERE activeTransactions.global_pid = blockingTransactions.blocking_global_pid
) AS sub;
RETURN mRemoteBlockingPids;
END;
$$ LANGUAGE plpgsql;
REVOKE ALL ON FUNCTION citus_blocking_pids(integer) FROM PUBLIC;

View File

@ -20,7 +20,7 @@ RETURNS int4[] AS $$
WITH activeTransactions AS (
SELECT global_pid FROM get_all_active_transactions()
), blockingTransactions AS (
SELECT blocking_global_pid FROM citus_internal_global_blocked_processes()
SELECT blocking_global_pid FROM citus_internal.global_blocked_processes()
WHERE waiting_global_pid = mLocalGlobalPid
)
SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions

View File

@ -0,0 +1,68 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger()
RETURNS event_trigger
LANGUAGE plpgsql
SET search_path = pg_catalog
AS $cdbdt$
DECLARE
constraint_event_count INTEGER;
v_obj record;
dropped_table_is_a_partition boolean := false;
BEGIN
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
WHERE object_type IN ('table', 'foreign table')
LOOP
-- first drop the table and metadata on the workers
-- then drop all the shards on the workers
-- finally remove the pg_dist_partition entry on the coordinator
PERFORM master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name);
-- If both original and normal values are false, the dropped table was a partition
-- that was dropped as a result of its parent being dropped
-- NOTE: the other way around is not true:
-- the table being a partition doesn't imply both original and normal values are false
SELECT (v_obj.original = false AND v_obj.normal = false) INTO dropped_table_is_a_partition;
-- The partition's shards will be dropped when dropping the parent's shards, so we can skip:
-- i.e. we call citus_drop_all_shards with drop_shards_metadata_only parameter set to true
IF dropped_table_is_a_partition
THEN
PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := true);
ELSE
PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false);
END IF;
PERFORM master_remove_partition_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name);
END LOOP;
FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects()
LOOP
-- Remove entries from pg_catalog.pg_dist_schema for all dropped tenant schemas.
-- Also delete the corresponding colocation group from pg_catalog.pg_dist_colocation.
--
-- Although normally we automatically delete the colocation groups when they become empty,
-- we don't do so for the colocation groups that are created for tenant schemas. For this
-- reason, here we need to delete the colocation group when the tenant schema is dropped.
IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_schema WHERE schemaid = v_obj.objid)
THEN
PERFORM citus_internal.unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name);
END IF;
-- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects
PERFORM master_unmark_object_distributed(v_obj.classid, v_obj.objid, v_obj.objsubid);
END LOOP;
SELECT COUNT(*) INTO constraint_event_count
FROM pg_event_trigger_dropped_objects()
WHERE object_type IN ('table constraint');
IF constraint_event_count > 0
THEN
-- Tell utility hook that a table constraint is dropped so we might
-- need to undistribute some of the citus local tables that are not
-- connected to any reference tables.
PERFORM notify_constraint_dropped();
END IF;
END;
$cdbdt$;
COMMENT ON FUNCTION pg_catalog.citus_drop_trigger()
IS 'perform checks and actions at the end of DROP actions';

View File

@ -44,7 +44,7 @@ BEGIN
-- reason, here we need to delete the colocation group when the tenant schema is dropped.
IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_schema WHERE schemaid = v_obj.objid)
THEN
PERFORM pg_catalog.citus_internal_unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name);
PERFORM citus_internal.unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name);
END IF;
-- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects

View File

@ -96,7 +96,7 @@ END;
IF all_nodes_can_connect_to_each_other != True THEN
RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all '
'nodes are up and runnnig. Also, make sure that all nodes can connect '
'nodes are up and running. Also, make sure that all nodes can connect '
'to each other. Use SELECT * FROM citus_check_cluster_node_health(); '
'to check the cluster health';
ELSE

View File

@ -96,7 +96,7 @@ END;
IF all_nodes_can_connect_to_each_other != True THEN
RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all '
'nodes are up and runnnig. Also, make sure that all nodes can connect '
'nodes are up and running. Also, make sure that all nodes can connect '
'to each other. Use SELECT * FROM citus_check_cluster_node_health(); '
'to check the cluster health';
ELSE

View File

@ -96,7 +96,7 @@ END;
IF all_nodes_can_connect_to_each_other != True THEN
RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all '
'nodes are up and runnnig. Also, make sure that all nodes can connect '
'nodes are up and running. Also, make sure that all nodes can connect '
'to each other. Use SELECT * FROM citus_check_cluster_node_health(); '
'to check the cluster health';
ELSE

View File

@ -96,7 +96,7 @@ END;
IF all_nodes_can_connect_to_each_other != True THEN
RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all '
'nodes are up and runnnig. Also, make sure that all nodes can connect '
'nodes are up and running. Also, make sure that all nodes can connect '
'to each other. Use SELECT * FROM citus_check_cluster_node_health(); '
'to check the cluster health';
ELSE

View File

@ -0,0 +1,227 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_finish_pg_upgrade()
RETURNS void
LANGUAGE plpgsql
SET search_path = pg_catalog
AS $cppu$
DECLARE
table_name regclass;
command text;
trigger_name text;
BEGIN
IF substring(current_Setting('server_version'), '\d+')::int >= 14 THEN
EXECUTE $cmd$
-- disable propagation to prevent EnsureCoordinator errors
-- the aggregate created here does not depend on Citus extension (yet)
-- since we add the dependency with the next command
SET citus.enable_ddl_propagation TO OFF;
CREATE AGGREGATE array_cat_agg(anycompatiblearray) (SFUNC = array_cat, STYPE = anycompatiblearray);
COMMENT ON AGGREGATE array_cat_agg(anycompatiblearray)
IS 'concatenate input arrays into a single array';
RESET citus.enable_ddl_propagation;
$cmd$;
ELSE
EXECUTE $cmd$
SET citus.enable_ddl_propagation TO OFF;
CREATE AGGREGATE array_cat_agg(anyarray) (SFUNC = array_cat, STYPE = anyarray);
COMMENT ON AGGREGATE array_cat_agg(anyarray)
IS 'concatenate input arrays into a single array';
RESET citus.enable_ddl_propagation;
$cmd$;
END IF;
--
-- Citus creates the array_cat_agg but because of a compatibility
-- issue between pg13-pg14, we drop and create it during upgrade.
-- And as Citus creates it, there needs to be a dependency to the
-- Citus extension, so we create that dependency here.
-- We are not using:
-- ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg
-- because we don't have an easy way to check if the aggregate
-- exists with anyarray type or anycompatiblearray type.
INSERT INTO pg_depend
SELECT
'pg_proc'::regclass::oid as classid,
(SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') as objid,
0 as objsubid,
'pg_extension'::regclass::oid as refclassid,
(select oid from pg_extension where extname = 'citus') as refobjid,
0 as refobjsubid ,
'e' as deptype;
-- PG16 has its own any_value, so only create it pre PG16.
-- We can remove this part when we drop support for PG16
IF substring(current_Setting('server_version'), '\d+')::int < 16 THEN
EXECUTE $cmd$
-- disable propagation to prevent EnsureCoordinator errors
-- the aggregate created here does not depend on Citus extension (yet)
-- since we add the dependency with the next command
SET citus.enable_ddl_propagation TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.any_value_agg ( anyelement, anyelement )
RETURNS anyelement AS $$
SELECT CASE WHEN $1 IS NULL THEN $2 ELSE $1 END;
$$ LANGUAGE SQL STABLE;
CREATE AGGREGATE pg_catalog.any_value (
sfunc = pg_catalog.any_value_agg,
combinefunc = pg_catalog.any_value_agg,
basetype = anyelement,
stype = anyelement
);
COMMENT ON AGGREGATE pg_catalog.any_value(anyelement) IS
'Returns the value of any row in the group. It is mostly useful when you know there will be only 1 element.';
RESET citus.enable_ddl_propagation;
--
-- Citus creates the any_value aggregate but because of a compatibility
-- issue between pg15-pg16 -- any_value is created in PG16, we drop
-- and create it during upgrade IF upgraded version is less than 16.
-- And as Citus creates it, there needs to be a dependency to the
-- Citus extension, so we create that dependency here.
INSERT INTO pg_depend
SELECT
'pg_proc'::regclass::oid as classid,
(SELECT oid FROM pg_proc WHERE proname = 'any_value_agg') as objid,
0 as objsubid,
'pg_extension'::regclass::oid as refclassid,
(select oid from pg_extension where extname = 'citus') as refobjid,
0 as refobjsubid ,
'e' as deptype;
INSERT INTO pg_depend
SELECT
'pg_proc'::regclass::oid as classid,
(SELECT oid FROM pg_proc WHERE proname = 'any_value') as objid,
0 as objsubid,
'pg_extension'::regclass::oid as refclassid,
(select oid from pg_extension where extname = 'citus') as refobjid,
0 as refobjsubid ,
'e' as deptype;
$cmd$;
END IF;
--
-- restore citus catalog tables
--
INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition;
-- if we are upgrading from PG14/PG15 to PG16+,
-- we need to regenerate the partkeys because they will include varnullingrels as well.
UPDATE pg_catalog.pg_dist_partition
SET partkey = column_name_to_column(pg_dist_partkeys_pre_16_upgrade.logicalrelid, col_name)
FROM public.pg_dist_partkeys_pre_16_upgrade
WHERE pg_dist_partkeys_pre_16_upgrade.logicalrelid = pg_dist_partition.logicalrelid;
DROP TABLE public.pg_dist_partkeys_pre_16_upgrade;
INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard;
INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement;
INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata;
INSERT INTO pg_catalog.pg_dist_node SELECT * FROM public.pg_dist_node;
INSERT INTO pg_catalog.pg_dist_local_group SELECT * FROM public.pg_dist_local_group;
INSERT INTO pg_catalog.pg_dist_transaction SELECT * FROM public.pg_dist_transaction;
INSERT INTO pg_catalog.pg_dist_colocation SELECT * FROM public.pg_dist_colocation;
INSERT INTO pg_catalog.pg_dist_cleanup SELECT * FROM public.pg_dist_cleanup;
INSERT INTO pg_catalog.pg_dist_schema SELECT schemaname::regnamespace, colocationid FROM public.pg_dist_schema;
-- enterprise catalog tables
INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo;
INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo;
-- Temporarily disable trigger to check for validity of functions while
-- inserting. The current contents of the table might be invalid if one of
-- the functions was removed by the user without also removing the
-- rebalance strategy. Obviously that's not great, but it should be no
-- reason to fail the upgrade.
ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger;
INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT
name,
default_strategy,
shard_cost_function::regprocedure::regproc,
node_capacity_function::regprocedure::regproc,
shard_allowed_on_node_function::regprocedure::regproc,
default_threshold,
minimum_threshold,
improvement_threshold
FROM public.pg_dist_rebalance_strategy;
ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger;
--
-- drop backup tables
--
DROP TABLE public.pg_dist_authinfo;
DROP TABLE public.pg_dist_colocation;
DROP TABLE public.pg_dist_local_group;
DROP TABLE public.pg_dist_node;
DROP TABLE public.pg_dist_node_metadata;
DROP TABLE public.pg_dist_partition;
DROP TABLE public.pg_dist_placement;
DROP TABLE public.pg_dist_poolinfo;
DROP TABLE public.pg_dist_shard;
DROP TABLE public.pg_dist_transaction;
DROP TABLE public.pg_dist_rebalance_strategy;
DROP TABLE public.pg_dist_cleanup;
DROP TABLE public.pg_dist_schema;
--
-- reset sequences
--
PERFORM setval('pg_catalog.pg_dist_shardid_seq', (SELECT MAX(shardid)+1 AS max_shard_id FROM pg_dist_shard), false);
PERFORM setval('pg_catalog.pg_dist_placement_placementid_seq', (SELECT MAX(placementid)+1 AS max_placement_id FROM pg_dist_placement), false);
PERFORM setval('pg_catalog.pg_dist_groupid_seq', (SELECT MAX(groupid)+1 AS max_group_id FROM pg_dist_node), false);
PERFORM setval('pg_catalog.pg_dist_node_nodeid_seq', (SELECT MAX(nodeid)+1 AS max_node_id FROM pg_dist_node), false);
PERFORM setval('pg_catalog.pg_dist_colocationid_seq', (SELECT MAX(colocationid)+1 AS max_colocation_id FROM pg_dist_colocation), false);
PERFORM setval('pg_catalog.pg_dist_operationid_seq', (SELECT MAX(operation_id)+1 AS max_operation_id FROM pg_dist_cleanup), false);
PERFORM setval('pg_catalog.pg_dist_cleanup_recordid_seq', (SELECT MAX(record_id)+1 AS max_record_id FROM pg_dist_cleanup), false);
PERFORM setval('pg_catalog.pg_dist_clock_logical_seq', (SELECT last_value FROM public.pg_dist_clock_logical_seq), false);
DROP TABLE public.pg_dist_clock_logical_seq;
--
-- register triggers
--
FOR table_name IN SELECT logicalrelid FROM pg_catalog.pg_dist_partition JOIN pg_class ON (logicalrelid = oid) WHERE relkind <> 'f'
LOOP
trigger_name := 'truncate_trigger_' || table_name::oid;
command := 'create trigger ' || trigger_name || ' after truncate on ' || table_name || ' execute procedure pg_catalog.citus_truncate_trigger()';
EXECUTE command;
command := 'update pg_trigger set tgisinternal = true where tgname = ' || quote_literal(trigger_name);
EXECUTE command;
END LOOP;
--
-- set dependencies
--
INSERT INTO pg_depend
SELECT
'pg_class'::regclass::oid as classid,
p.logicalrelid::regclass::oid as objid,
0 as objsubid,
'pg_extension'::regclass::oid as refclassid,
(select oid from pg_extension where extname = 'citus') as refobjid,
0 as refobjsubid ,
'n' as deptype
FROM pg_catalog.pg_dist_partition p;
-- set dependencies for columnar table access method
PERFORM columnar_internal.columnar_ensure_am_depends_catalog();
-- restore pg_dist_object from the stable identifiers
TRUNCATE pg_catalog.pg_dist_object;
INSERT INTO pg_catalog.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid)
SELECT
address.classid,
address.objid,
address.objsubid,
naming.distribution_argument_index,
naming.colocationid
FROM
public.pg_dist_object naming,
pg_catalog.pg_get_object_address(naming.type, naming.object_names, naming.object_args) address;
DROP TABLE public.pg_dist_object;
END;
$cppu$;
COMMENT ON FUNCTION pg_catalog.citus_finish_pg_upgrade()
IS 'perform tasks to restore citus settings from a location that has been prepared before pg_upgrade';

View File

@ -128,6 +128,12 @@ BEGIN
INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo;
INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo;
-- Temporarily disable trigger to check for validity of functions while
-- inserting. The current contents of the table might be invalid if one of
-- the functions was removed by the user without also removing the
-- rebalance strategy. Obviously that's not great, but it should be no
-- reason to fail the upgrade.
ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger;
INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT
name,
default_strategy,
@ -138,6 +144,7 @@ BEGIN
minimum_threshold,
improvement_threshold
FROM public.pg_dist_rebalance_strategy;
ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger;
--
-- drop backup tables

View File

@ -0,0 +1,36 @@
-- create a new function, without shardstate
CREATE OR REPLACE FUNCTION citus_internal.add_placement_metadata(
shard_id bigint,
shard_length bigint, group_id integer,
placement_id bigint)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$;
COMMENT ON FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint) IS
'Inserts into pg_dist_shard_placement with user checks';
-- create a new function, without shardstate
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata(
shard_id bigint,
shard_length bigint, group_id integer,
placement_id bigint)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, bigint, integer, bigint) IS
'Inserts into pg_dist_shard_placement with user checks';
-- replace the old one so it would call the old C function with shard_state
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata(
shard_id bigint, shard_state integer,
shard_length bigint, group_id integer,
placement_id bigint)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata_legacy$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, integer, bigint, integer, bigint) IS
'Inserts into pg_dist_shard_placement with user checks';

View File

@ -1,3 +1,15 @@
-- create a new function, without shardstate
CREATE OR REPLACE FUNCTION citus_internal.add_placement_metadata(
shard_id bigint,
shard_length bigint, group_id integer,
placement_id bigint)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$;
COMMENT ON FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint) IS
'Inserts into pg_dist_shard_placement with user checks';
-- create a new function, without shardstate
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata(
shard_id bigint,

View File

@ -0,0 +1,21 @@
CREATE OR REPLACE FUNCTION citus_internal.add_shard_metadata(
relation_id regclass, shard_id bigint,
storage_type "char", shard_min_value text,
shard_max_value text
)
RETURNS void
LANGUAGE C
AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata$$;
COMMENT ON FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text) IS
'Inserts into pg_dist_shard with user checks';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata(
relation_id regclass, shard_id bigint,
storage_type "char", shard_min_value text,
shard_max_value text
)
RETURNS void
LANGUAGE C
AS 'MODULE_PATHNAME';
COMMENT ON FUNCTION pg_catalog.citus_internal_add_shard_metadata(regclass, bigint, "char", text, text) IS
'Inserts into pg_dist_shard with user checks';

View File

@ -1,3 +1,14 @@
CREATE OR REPLACE FUNCTION citus_internal.add_shard_metadata(
relation_id regclass, shard_id bigint,
storage_type "char", shard_min_value text,
shard_max_value text
)
RETURNS void
LANGUAGE C
AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata$$;
COMMENT ON FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text) IS
'Inserts into pg_dist_shard with user checks';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata(
relation_id regclass, shard_id bigint,
storage_type "char", shard_min_value text,

View File

@ -0,0 +1,17 @@
CREATE OR REPLACE FUNCTION citus_internal.add_tenant_schema(schema_id Oid, colocation_id int)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_add_tenant_schema$$;
COMMENT ON FUNCTION citus_internal.add_tenant_schema(Oid, int) IS
'insert given tenant schema into pg_dist_schema with given colocation id';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_tenant_schema(schema_id Oid, colocation_id int)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME';
COMMENT ON FUNCTION pg_catalog.citus_internal_add_tenant_schema(Oid, int) IS
'insert given tenant schema into pg_dist_schema with given colocation id';

View File

@ -1,3 +1,12 @@
CREATE OR REPLACE FUNCTION citus_internal.add_tenant_schema(schema_id Oid, colocation_id int)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_add_tenant_schema$$;
COMMENT ON FUNCTION citus_internal.add_tenant_schema(Oid, int) IS
'insert given tenant schema into pg_dist_schema with given colocation id';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_tenant_schema(schema_id Oid, colocation_id int)
RETURNS void
LANGUAGE C

View File

@ -0,0 +1,17 @@
CREATE OR REPLACE FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock)
RETURNS void
LANGUAGE C STABLE PARALLEL SAFE STRICT
AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$;
COMMENT ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock)
IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster';
REVOKE ALL ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC;
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock)
RETURNS void
LANGUAGE C STABLE PARALLEL SAFE STRICT
AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock)
IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster';
REVOKE ALL ON FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC;

View File

@ -1,3 +1,12 @@
CREATE OR REPLACE FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock)
RETURNS void
LANGUAGE C STABLE PARALLEL SAFE STRICT
AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$;
COMMENT ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock)
IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster';
REVOKE ALL ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC;
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock)
RETURNS void
LANGUAGE C STABLE PARALLEL SAFE STRICT

View File

@ -1,10 +1,10 @@
--
-- citus_internal_database_command run given database command without transaction block restriction.
-- citus_internal.database_command run given database command without transaction block restriction.
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text)
CREATE OR REPLACE FUNCTION citus_internal.database_command(command text)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_database_command$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS
COMMENT ON FUNCTION citus_internal.database_command(text) IS
'run a database command without transaction block restrictions';

View File

@ -1,10 +1,10 @@
--
-- citus_internal_database_command run given database command without transaction block restriction.
-- citus_internal.database_command run given database command without transaction block restriction.
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text)
CREATE OR REPLACE FUNCTION citus_internal.database_command(command text)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_database_command$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS
COMMENT ON FUNCTION citus_internal.database_command(text) IS
'run a database command without transaction block restrictions';

View File

@ -0,0 +1,19 @@
CREATE OR REPLACE FUNCTION citus_internal.delete_colocation_metadata(
colocation_id int)
RETURNS void
LANGUAGE C
STRICT
AS 'MODULE_PATHNAME', $$citus_internal_delete_colocation_metadata$$;
COMMENT ON FUNCTION citus_internal.delete_colocation_metadata(int) IS
'deletes a co-location group from pg_dist_colocation';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_colocation_metadata(
colocation_id int)
RETURNS void
LANGUAGE C
STRICT
AS 'MODULE_PATHNAME';
COMMENT ON FUNCTION pg_catalog.citus_internal_delete_colocation_metadata(int) IS
'deletes a co-location group from pg_dist_colocation';

View File

@ -1,3 +1,13 @@
CREATE OR REPLACE FUNCTION citus_internal.delete_colocation_metadata(
colocation_id int)
RETURNS void
LANGUAGE C
STRICT
AS 'MODULE_PATHNAME', $$citus_internal_delete_colocation_metadata$$;
COMMENT ON FUNCTION citus_internal.delete_colocation_metadata(int) IS
'deletes a co-location group from pg_dist_colocation';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_colocation_metadata(
colocation_id int)
RETURNS void

View File

@ -0,0 +1,14 @@
CREATE OR REPLACE FUNCTION citus_internal.delete_partition_metadata(table_name regclass)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_delete_partition_metadata$$;
COMMENT ON FUNCTION citus_internal.delete_partition_metadata(regclass) IS
'Deletes a row from pg_dist_partition with table ownership checks';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_partition_metadata(table_name regclass)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME';
COMMENT ON FUNCTION pg_catalog.citus_internal_delete_partition_metadata(regclass) IS
'Deletes a row from pg_dist_partition with table ownership checks';

View File

@ -1,3 +1,10 @@
CREATE OR REPLACE FUNCTION citus_internal.delete_partition_metadata(table_name regclass)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_delete_partition_metadata$$;
COMMENT ON FUNCTION citus_internal.delete_partition_metadata(regclass) IS
'Deletes a row from pg_dist_partition with table ownership checks';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_partition_metadata(table_name regclass)
RETURNS void
LANGUAGE C STRICT

View File

@ -0,0 +1,19 @@
CREATE OR REPLACE FUNCTION citus_internal.delete_placement_metadata(
placement_id bigint)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME',
$$citus_internal_delete_placement_metadata$$;
COMMENT ON FUNCTION citus_internal.delete_placement_metadata(bigint)
IS 'Delete placement with given id from pg_dist_placement metadata table.';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_placement_metadata(
placement_id bigint)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME',
$$citus_internal_delete_placement_metadata$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_delete_placement_metadata(bigint)
IS 'Delete placement with given id from pg_dist_placement metadata table.';

View File

@ -1,3 +1,13 @@
CREATE OR REPLACE FUNCTION citus_internal.delete_placement_metadata(
placement_id bigint)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME',
$$citus_internal_delete_placement_metadata$$;
COMMENT ON FUNCTION citus_internal.delete_placement_metadata(bigint)
IS 'Delete placement with given id from pg_dist_placement metadata table.';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_placement_metadata(
placement_id bigint)
RETURNS void

View File

@ -0,0 +1,14 @@
CREATE OR REPLACE FUNCTION citus_internal.delete_shard_metadata(shard_id bigint)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_delete_shard_metadata$$;
COMMENT ON FUNCTION citus_internal.delete_shard_metadata(bigint) IS
'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_shard_metadata(shard_id bigint)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME';
COMMENT ON FUNCTION pg_catalog.citus_internal_delete_shard_metadata(bigint) IS
'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks';

View File

@ -1,3 +1,10 @@
CREATE OR REPLACE FUNCTION citus_internal.delete_shard_metadata(shard_id bigint)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_delete_shard_metadata$$;
COMMENT ON FUNCTION citus_internal.delete_shard_metadata(bigint) IS
'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_shard_metadata(shard_id bigint)
RETURNS void
LANGUAGE C STRICT

View File

@ -0,0 +1,17 @@
CREATE OR REPLACE FUNCTION citus_internal.delete_tenant_schema(schema_id Oid)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_delete_tenant_schema$$;
COMMENT ON FUNCTION citus_internal.delete_tenant_schema(Oid) IS
'delete given tenant schema from pg_dist_schema';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_tenant_schema(schema_id Oid)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME';
COMMENT ON FUNCTION pg_catalog.citus_internal_delete_tenant_schema(Oid) IS
'delete given tenant schema from pg_dist_schema';

View File

@ -1,3 +1,12 @@
CREATE OR REPLACE FUNCTION citus_internal.delete_tenant_schema(schema_id Oid)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_delete_tenant_schema$$;
COMMENT ON FUNCTION citus_internal.delete_tenant_schema(Oid) IS
'delete given tenant schema from pg_dist_schema';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_tenant_schema(schema_id Oid)
RETURNS void
LANGUAGE C

View File

@ -0,0 +1,35 @@
CREATE OR REPLACE FUNCTION citus_internal.global_blocked_processes(
OUT waiting_global_pid int8,
OUT waiting_pid int4,
OUT waiting_node_id int4,
OUT waiting_transaction_num int8,
OUT waiting_transaction_stamp timestamptz,
OUT blocking_global_pid int8,
OUT blocking_pid int4,
OUT blocking_node_id int4,
OUT blocking_transaction_num int8,
OUT blocking_transaction_stamp timestamptz,
OUT blocking_transaction_waiting bool)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$;
COMMENT ON FUNCTION citus_internal.global_blocked_processes()
IS 'returns a global list of blocked backends originating from this node';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes(
OUT waiting_global_pid int8,
OUT waiting_pid int4,
OUT waiting_node_id int4,
OUT waiting_transaction_num int8,
OUT waiting_transaction_stamp timestamptz,
OUT blocking_global_pid int8,
OUT blocking_pid int4,
OUT blocking_node_id int4,
OUT blocking_transaction_num int8,
OUT blocking_transaction_stamp timestamptz,
OUT blocking_transaction_waiting bool)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_global_blocked_processes()
IS 'returns a global list of blocked backends originating from this node';

View File

@ -1,3 +1,21 @@
CREATE OR REPLACE FUNCTION citus_internal.global_blocked_processes(
OUT waiting_global_pid int8,
OUT waiting_pid int4,
OUT waiting_node_id int4,
OUT waiting_transaction_num int8,
OUT waiting_transaction_stamp timestamptz,
OUT blocking_global_pid int8,
OUT blocking_pid int4,
OUT blocking_node_id int4,
OUT blocking_transaction_num int8,
OUT blocking_transaction_stamp timestamptz,
OUT blocking_transaction_waiting bool)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$;
COMMENT ON FUNCTION citus_internal.global_blocked_processes()
IS 'returns a global list of blocked backends originating from this node';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes(
OUT waiting_global_pid int8,
OUT waiting_pid int4,

View File

@ -0,0 +1,35 @@
CREATE OR REPLACE FUNCTION citus_internal.local_blocked_processes(
OUT waiting_global_pid int8,
OUT waiting_pid int4,
OUT waiting_node_id int4,
OUT waiting_transaction_num int8,
OUT waiting_transaction_stamp timestamptz,
OUT blocking_global_pid int8,
OUT blocking_pid int4,
OUT blocking_node_id int4,
OUT blocking_transaction_num int8,
OUT blocking_transaction_stamp timestamptz,
OUT blocking_transaction_waiting bool)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$;
COMMENT ON FUNCTION citus_internal.local_blocked_processes()
IS 'returns all local lock wait chains, that start from any citus backend';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes(
OUT waiting_global_pid int8,
OUT waiting_pid int4,
OUT waiting_node_id int4,
OUT waiting_transaction_num int8,
OUT waiting_transaction_stamp timestamptz,
OUT blocking_global_pid int8,
OUT blocking_pid int4,
OUT blocking_node_id int4,
OUT blocking_transaction_num int8,
OUT blocking_transaction_stamp timestamptz,
OUT blocking_transaction_waiting bool)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_local_blocked_processes()
IS 'returns all local lock wait chains, that start from any citus backend';

View File

@ -1,3 +1,21 @@
CREATE OR REPLACE FUNCTION citus_internal.local_blocked_processes(
OUT waiting_global_pid int8,
OUT waiting_pid int4,
OUT waiting_node_id int4,
OUT waiting_transaction_num int8,
OUT waiting_transaction_stamp timestamptz,
OUT blocking_global_pid int8,
OUT blocking_pid int4,
OUT blocking_node_id int4,
OUT blocking_transaction_num int8,
OUT blocking_transaction_stamp timestamptz,
OUT blocking_transaction_waiting bool)
RETURNS SETOF RECORD
LANGUAGE C STRICT
AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$;
COMMENT ON FUNCTION citus_internal.local_blocked_processes()
IS 'returns all local lock wait chains, that start from any citus backend';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes(
OUT waiting_global_pid int8,
OUT waiting_pid int4,

View File

@ -0,0 +1,13 @@
CREATE OR REPLACE FUNCTION citus_internal.mark_node_not_synced(parent_pid int, nodeid int)
RETURNS VOID
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$;
COMMENT ON FUNCTION citus_internal.mark_node_not_synced(int, int)
IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int)
RETURNS VOID
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$;
COMMENT ON FUNCTION citus_internal_mark_node_not_synced(int, int)
IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.';

View File

@ -1,3 +1,10 @@
CREATE OR REPLACE FUNCTION citus_internal.mark_node_not_synced(parent_pid int, nodeid int)
RETURNS VOID
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$;
COMMENT ON FUNCTION citus_internal.mark_node_not_synced(int, int)
IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int)
RETURNS VOID
LANGUAGE C STRICT

View File

@ -0,0 +1,15 @@
CREATE OR REPLACE FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_unregister_tenant_schema_globally$$;
COMMENT ON FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS
'Delete a tenant schema and the corresponding colocation group from metadata tables.';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME';
COMMENT ON FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS
'Delete a tenant schema and the corresponding colocation group from metadata tables.';

View File

@ -1,3 +1,11 @@
CREATE OR REPLACE FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_unregister_tenant_schema_globally$$;
COMMENT ON FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS
'Delete a tenant schema and the corresponding colocation group from metadata tables.';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text)
RETURNS void
LANGUAGE C

View File

@ -0,0 +1,23 @@
CREATE OR REPLACE FUNCTION citus_internal.update_none_dist_table_metadata(
relation_id oid,
replication_model "char",
colocation_id bigint,
auto_converted boolean)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_update_none_dist_table_metadata$$;
COMMENT ON FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean)
IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(
relation_id oid,
replication_model "char",
colocation_id bigint,
auto_converted boolean)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME';
COMMENT ON FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(oid, "char", bigint, boolean)
IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.';

View File

@ -1,3 +1,15 @@
CREATE OR REPLACE FUNCTION citus_internal.update_none_dist_table_metadata(
relation_id oid,
replication_model "char",
colocation_id bigint,
auto_converted boolean)
RETURNS void
LANGUAGE C
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_update_none_dist_table_metadata$$;
COMMENT ON FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean)
IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(
relation_id oid,
replication_model "char",

View File

@ -0,0 +1,19 @@
CREATE OR REPLACE FUNCTION citus_internal.update_placement_metadata(
shard_id bigint, source_group_id integer,
target_group_id integer)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_update_placement_metadata$$;
COMMENT ON FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer) IS
'Updates into pg_dist_placement with user checks';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_placement_metadata(
shard_id bigint, source_group_id integer,
target_group_id integer)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME';
COMMENT ON FUNCTION pg_catalog.citus_internal_update_placement_metadata(bigint, integer, integer) IS
'Updates into pg_dist_placement with user checks';

View File

@ -1,3 +1,13 @@
CREATE OR REPLACE FUNCTION citus_internal.update_placement_metadata(
shard_id bigint, source_group_id integer,
target_group_id integer)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_update_placement_metadata$$;
COMMENT ON FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer) IS
'Updates into pg_dist_placement with user checks';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_placement_metadata(
shard_id bigint, source_group_id integer,
target_group_id integer)

View File

@ -0,0 +1,14 @@
CREATE OR REPLACE FUNCTION citus_internal.update_relation_colocation(relation_id Oid, target_colocation_id int)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_update_relation_colocation$$;
COMMENT ON FUNCTION citus_internal.update_relation_colocation(oid, int) IS
'Updates colocationId field of pg_dist_partition for the relation_id';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_relation_colocation(relation_id Oid, target_colocation_id int)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME';
COMMENT ON FUNCTION pg_catalog.citus_internal_update_relation_colocation(oid, int) IS
'Updates colocationId field of pg_dist_partition for the relation_id';

View File

@ -1,3 +1,10 @@
CREATE OR REPLACE FUNCTION citus_internal.update_relation_colocation(relation_id Oid, target_colocation_id int)
RETURNS void
LANGUAGE C STRICT
AS 'MODULE_PATHNAME', $$citus_internal_update_relation_colocation$$;
COMMENT ON FUNCTION citus_internal.update_relation_colocation(oid, int) IS
'Updates colocationId field of pg_dist_partition for the relation_id';
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_relation_colocation(relation_id Oid, target_colocation_id int)
RETURNS void
LANGUAGE C STRICT

Some files were not shown because too many files have changed in this diff Show More