Merge branch 'main' into extend-columar_insert-test

pull/7670/head
Karina 2025-08-07 16:56:49 +03:00 committed by GitHub
commit bf34901241
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
559 changed files with 43466 additions and 23991 deletions

View File

@ -3,5 +3,5 @@
\pset border 2 \pset border 2
\setenv PAGER 'pspg --no-mouse -bX --no-commandbar --no-topbar' \setenv PAGER 'pspg --no-mouse -bX --no-commandbar --no-topbar'
\set HISTSIZE 100000 \set HISTSIZE 100000
\set PROMPT1 '\n%[%033[1m%]%M %n@%/:%>-%p%R%[%033[0m%]%# ' \set PROMPT1 '\n%[%033[1m%]%M %n@%/:%> (PID: %p)%R%[%033[0m%]%# '
\set PROMPT2 ' ' \set PROMPT2 ' '

View File

@ -6,9 +6,12 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
# install build tools # install build tools
RUN apt update && apt install -y \ RUN apt update && apt install -y \
bison \
bzip2 \ bzip2 \
cpanminus \ cpanminus \
curl \ curl \
docbook-xml \
docbook-xsl \
flex \ flex \
gcc \ gcc \
git \ git \
@ -20,6 +23,7 @@ RUN apt update && apt install -y \
libreadline-dev \ libreadline-dev \
libselinux1-dev \ libselinux1-dev \
libssl-dev \ libssl-dev \
libxml2-utils \
libxslt-dev \ libxslt-dev \
libzstd-dev \ libzstd-dev \
locales \ locales \
@ -32,6 +36,7 @@ RUN apt update && apt install -y \
sudo \ sudo \
uuid-dev \ uuid-dev \
valgrind \ valgrind \
xsltproc \
zlib1g-dev \ zlib1g-dev \
&& add-apt-repository ppa:deadsnakes/ppa -y \ && add-apt-repository ppa:deadsnakes/ppa -y \
&& apt install -y \ && apt install -y \
@ -67,20 +72,8 @@ ENV PATH="/home/citus/.pgenv/pgsql/bin:${PATH}"
USER citus USER citus
# build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions # build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions
FROM base AS pg14
RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.12
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
# create a staging directory with all files we want to copy from our pgenv build
# we will copy the contents of the staged folder into the final image at once
RUN mkdir .pgenv-staging/
RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf
FROM base AS pg15 FROM base AS pg15
RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.7 RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.13
RUN rm .pgenv/src/*.tar* RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install RUN make -C .pgenv/src/postgresql-*/src/include install
@ -92,7 +85,19 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf RUN rm .pgenv-staging/config/default.conf
FROM base AS pg16 FROM base AS pg16
RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.3 RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.9
RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install
# create a staging directory with all files we want to copy from our pgenv build
# we will copy the contents of the staged folder into the final image at once
RUN mkdir .pgenv-staging/
RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/
RUN rm .pgenv-staging/config/default.conf
FROM base AS pg17
RUN MAKEFLAGS="-j $(nproc)" pgenv build 17.5
RUN rm .pgenv/src/*.tar* RUN rm .pgenv/src/*.tar*
RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/ clean
RUN make -C .pgenv/src/postgresql-*/src/include install RUN make -C .pgenv/src/postgresql-*/src/include install
@ -193,9 +198,9 @@ RUN git clone https://github.com/so-fancy/diff-so-fancy.git \
COPY --link --from=uncrustify-builder /uncrustify/usr/ /usr/ COPY --link --from=uncrustify-builder /uncrustify/usr/ /usr/
COPY --link --from=pg14 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pg15 /home/citus/.pgenv-staging/ /home/citus/.pgenv/ COPY --link --from=pg15 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pg16 /home/citus/.pgenv-staging/ /home/citus/.pgenv/ COPY --link --from=pg16 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pg17 /home/citus/.pgenv-staging/ /home/citus/.pgenv/
COPY --link --from=pipenv /home/citus/.local/share/virtualenvs/ /home/citus/.local/share/virtualenvs/ COPY --link --from=pipenv /home/citus/.local/share/virtualenvs/ /home/citus/.local/share/virtualenvs/
@ -211,7 +216,7 @@ COPY --chown=citus:citus .psqlrc .
RUN sudo chown --from=root:root citus:citus -R ~ RUN sudo chown --from=root:root citus:citus -R ~
# sets default pg version # sets default pg version
RUN pgenv switch 16.3 RUN pgenv switch 17.5
# make connecting to the coordinator easy # make connecting to the coordinator easy
ENV PGPORT=9700 ENV PGPORT=9700

View File

@ -1,4 +1,4 @@
black==23.11.0 black==24.3.0
click==8.1.7 click==8.1.7
isort==5.12.0 isort==5.12.0
mypy-extensions==1.0.0 mypy-extensions==1.0.0

View File

@ -16,7 +16,7 @@ pytest-timeout = "*"
pytest-xdist = "*" pytest-xdist = "*"
pytest-repeat = "*" pytest-repeat = "*"
pyyaml = "*" pyyaml = "*"
werkzeug = "==2.3.7" werkzeug = "==3.0.6"
[dev-packages] [dev-packages]
black = "*" black = "*"

View File

@ -1,7 +1,7 @@
{ {
"_meta": { "_meta": {
"hash": { "hash": {
"sha256": "f8db86383082539f626f1402e720f5f2e3f9718b44a8f26110cf9f52e7ca46bc" "sha256": "bdfddfee81a47cfb42e76936d229e94f5d3cee75f612b7beb2d3008b06d6427b"
}, },
"pipfile-spec": 6, "pipfile-spec": 6,
"requires": { "requires": {
@ -119,69 +119,85 @@
}, },
"certifi": { "certifi": {
"hashes": [ "hashes": [
"sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f", "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b",
"sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1" "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"
], ],
"index": "pypi",
"markers": "python_version >= '3.6'", "markers": "python_version >= '3.6'",
"version": "==2024.2.2" "version": "==2024.7.4"
}, },
"cffi": { "cffi": {
"hashes": [ "hashes": [
"sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc", "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8",
"sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a", "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2",
"sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417", "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1",
"sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab", "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15",
"sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520", "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36",
"sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36", "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824",
"sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743", "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8",
"sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8", "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36",
"sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed", "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17",
"sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684", "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf",
"sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56", "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc",
"sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324", "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3",
"sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d", "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed",
"sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235", "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702",
"sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e", "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1",
"sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088", "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8",
"sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000", "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903",
"sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7", "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6",
"sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e", "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d",
"sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673", "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b",
"sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c", "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e",
"sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe", "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be",
"sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2", "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c",
"sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098", "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683",
"sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8", "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9",
"sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a", "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c",
"sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0", "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8",
"sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b", "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1",
"sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896", "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4",
"sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e", "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655",
"sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9", "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67",
"sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2", "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595",
"sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b", "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0",
"sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6", "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65",
"sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404", "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41",
"sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f", "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6",
"sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0", "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401",
"sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4", "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6",
"sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc", "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3",
"sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936", "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16",
"sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba", "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93",
"sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872", "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e",
"sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb", "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4",
"sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614", "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964",
"sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1", "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c",
"sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d", "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576",
"sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969", "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0",
"sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b", "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3",
"sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4", "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662",
"sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627", "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3",
"sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff",
"sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5",
"sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd",
"sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f",
"sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5",
"sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14",
"sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d",
"sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9",
"sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7",
"sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382",
"sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a",
"sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e",
"sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a",
"sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4",
"sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99",
"sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87",
"sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"
], ],
"markers": "platform_python_implementation != 'PyPy'", "markers": "python_version >= '3.8'",
"version": "==1.16.0" "version": "==1.17.1"
}, },
"click": { "click": {
"hashes": [ "hashes": [
@ -202,42 +218,41 @@
}, },
"cryptography": { "cryptography": {
"hashes": [ "hashes": [
"sha256:04859aa7f12c2b5f7e22d25198ddd537391f1695df7057c8700f71f26f47a129", "sha256:00918d859aa4e57db8299607086f793fa7813ae2ff5a4637e318a25ef82730f7",
"sha256:069d2ce9be5526a44093a0991c450fe9906cdf069e0e7cd67d9dee49a62b9ebe", "sha256:1e8d181e90a777b63f3f0caa836844a1182f1f265687fac2115fcf245f5fbec3",
"sha256:0d3ec384058b642f7fb7e7bff9664030011ed1af8f852540c76a1317a9dd0d20", "sha256:1f9a92144fa0c877117e9748c74501bea842f93d21ee00b0cf922846d9d0b183",
"sha256:0fab2a5c479b360e5e0ea9f654bcebb535e3aa1e493a715b13244f4e07ea8eec", "sha256:21377472ca4ada2906bc313168c9dc7b1d7ca417b63c1c3011d0c74b7de9ae69",
"sha256:0fea01527d4fb22ffe38cd98951c9044400f6eff4788cf52ae116e27d30a1ba3", "sha256:24979e9f2040c953a94bf3c6782e67795a4c260734e5264dceea65c8f4bae64a",
"sha256:1b797099d221df7cce5ff2a1d272761d1554ddf9a987d3e11f6459b38cd300fd", "sha256:2a46a89ad3e6176223b632056f321bc7de36b9f9b93b2cc1cccf935a3849dc62",
"sha256:1e935c2900fb53d31f491c0de04f41110351377be19d83d908c1fd502ae8daa5", "sha256:322eb03ecc62784536bc173f1483e76747aafeb69c8728df48537eb431cd1911",
"sha256:20100c22b298c9eaebe4f0b9032ea97186ac2555f426c3e70670f2517989543b", "sha256:436df4f203482f41aad60ed1813811ac4ab102765ecae7a2bbb1dbb66dcff5a7",
"sha256:20180da1b508f4aefc101cebc14c57043a02b355d1a652b6e8e537967f1e1b46", "sha256:4f422e8c6a28cf8b7f883eb790695d6d45b0c385a2583073f3cec434cc705e1a",
"sha256:25b09b73db78facdfd7dd0fa77a3f19e94896197c86e9f6dc16bce7b37a96504", "sha256:53f23339864b617a3dfc2b0ac8d5c432625c80014c25caac9082314e9de56f41",
"sha256:2619487f37da18d6826e27854a7f9d4d013c51eafb066c80d09c63cf24505306", "sha256:5fed5cd6102bb4eb843e3315d2bf25fede494509bddadb81e03a859c1bc17b83",
"sha256:2eb6368d5327d6455f20327fb6159b97538820355ec00f8cc9464d617caecead", "sha256:610a83540765a8d8ce0f351ce42e26e53e1f774a6efb71eb1b41eb01d01c3d12",
"sha256:35772a6cffd1f59b85cb670f12faba05513446f80352fe811689b4e439b5d89e", "sha256:6c8acf6f3d1f47acb2248ec3ea261171a671f3d9428e34ad0357148d492c7864",
"sha256:39d5c93e95bcbc4c06313fc6a500cee414ee39b616b55320c1904760ad686938", "sha256:6f76fdd6fd048576a04c5210d53aa04ca34d2ed63336d4abd306d0cbe298fddf",
"sha256:3d96ea47ce6d0055d5b97e761d37b4e84195485cb5a38401be341fabf23bc32a", "sha256:72198e2b5925155497a5a3e8c216c7fb3e64c16ccee11f0e7da272fa93b35c4c",
"sha256:4dcab7c25e48fc09a73c3e463d09ac902a932a0f8d0c568238b3696d06bf377b", "sha256:887143b9ff6bad2b7570da75a7fe8bbf5f65276365ac259a5d2d5147a73775f2",
"sha256:5fbf0f3f0fac7c089308bd771d2c6c7b7d53ae909dce1db52d8e921f6c19bb3a", "sha256:888fcc3fce0c888785a4876ca55f9f43787f4c5c1cc1e2e0da71ad481ff82c5b",
"sha256:6c25e1e9c2ce682d01fc5e2dde6598f7313027343bd14f4049b82ad0402e52cd", "sha256:8e6a85a93d0642bd774460a86513c5d9d80b5c002ca9693e63f6e540f1815ed0",
"sha256:762f3771ae40e111d78d77cbe9c1035e886ac04a234d3ee0856bf4ecb3749d54", "sha256:94f99f2b943b354a5b6307d7e8d19f5c423a794462bde2bf310c770ba052b1c4",
"sha256:90147dad8c22d64b2ff7331f8d4cddfdc3ee93e4879796f837bdbb2a0b141e0c", "sha256:9b336599e2cb77b1008cb2ac264b290803ec5e8e89d618a5e978ff5eb6f715d9",
"sha256:935cca25d35dda9e7bd46a24831dfd255307c55a07ff38fd1a92119cffc34857", "sha256:a2d8a7045e1ab9b9f803f0d9531ead85f90c5f2859e653b61497228b18452008",
"sha256:93fbee08c48e63d5d1b39ab56fd3fdd02e6c2431c3da0f4edaf54954744c718f", "sha256:b8272f257cf1cbd3f2e120f14c68bff2b6bdfcc157fafdee84a1b795efd72862",
"sha256:9541c69c62d7446539f2c1c06d7046aef822940d248fa4b8962ff0302862cc1f", "sha256:bf688f615c29bfe9dfc44312ca470989279f0e94bb9f631f85e3459af8efc009",
"sha256:c23f03cfd7d9826cdcbad7850de67e18b4654179e01fe9bc623d37c2638eb4ef", "sha256:d9c5b9f698a83c8bd71e0f4d3f9f839ef244798e5ffe96febfa9714717db7af7",
"sha256:c3d1f5a1d403a8e640fa0887e9f7087331abb3f33b0f2207d2cc7f213e4a864c", "sha256:dd7c7e2d71d908dc0f8d2027e1604102140d84b155e658c20e8ad1304317691f",
"sha256:d1998e545081da0ab276bcb4b33cce85f775adb86a516e8f55b3dac87f469548", "sha256:df978682c1504fc93b3209de21aeabf2375cb1571d4e61907b3e7a2540e83026",
"sha256:d5cf11bc7f0b71fb71af26af396c83dfd3f6eed56d4b6ef95d57867bf1e4ba65", "sha256:e403f7f766ded778ecdb790da786b418a9f2394f36e8cc8b796cc056ab05f44f",
"sha256:db0480ffbfb1193ac4e1e88239f31314fe4c6cdcf9c0b8712b55414afbf80db4", "sha256:eb3889330f2a4a148abead555399ec9a32b13b7c8ba969b72d8e500eb7ef84cd",
"sha256:de4ae486041878dc46e571a4c70ba337ed5233a1344c14a0790c4c4be4bbb8b4", "sha256:f4daefc971c2d1f82f03097dc6f216744a6cd2ac0f04c68fb935ea2ba2a0d420",
"sha256:de5086cd475d67113ccb6f9fae6d8fe3ac54a4f9238fd08bfdb07b03d791ff0a", "sha256:f51f5705ab27898afda1aaa430f34ad90dc117421057782022edf0600bec5f14",
"sha256:df34312149b495d9d03492ce97471234fd9037aa5ba217c2a6ea890e9166f151", "sha256:fd0ee90072861e276b0ff08bd627abec29e32a53b2be44e41dbcdf87cbee2b00"
"sha256:ead69ba488f806fe1b1b4050febafdbf206b81fa476126f3e16110c818bac396"
], ],
"index": "pypi", "index": "pypi",
"markers": "python_version >= '3.7'", "markers": "python_version >= '3.7' and python_full_version not in '3.9.0, 3.9.1'",
"version": "==42.0.3" "version": "==44.0.1"
}, },
"docopt": { "docopt": {
"hashes": [ "hashes": [
@ -329,11 +344,12 @@
}, },
"jinja2": { "jinja2": {
"hashes": [ "hashes": [
"sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa", "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d",
"sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90" "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"
], ],
"index": "pypi",
"markers": "python_version >= '3.7'", "markers": "python_version >= '3.7'",
"version": "==3.1.3" "version": "==3.1.6"
}, },
"kaitaistruct": { "kaitaistruct": {
"hashes": [ "hashes": [
@ -353,69 +369,70 @@
}, },
"markupsafe": { "markupsafe": {
"hashes": [ "hashes": [
"sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf", "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4",
"sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff", "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30",
"sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f", "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0",
"sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3", "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9",
"sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532", "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396",
"sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f", "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13",
"sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617", "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028",
"sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df", "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca",
"sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4", "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557",
"sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906", "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832",
"sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f", "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0",
"sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4", "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b",
"sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8", "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579",
"sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371", "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a",
"sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2", "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c",
"sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465", "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff",
"sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52", "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c",
"sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6", "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22",
"sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169", "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094",
"sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad", "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb",
"sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2", "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e",
"sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0", "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5",
"sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029", "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a",
"sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f", "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d",
"sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a", "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a",
"sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced", "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b",
"sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5", "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8",
"sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c", "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225",
"sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf", "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c",
"sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9", "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144",
"sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb", "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f",
"sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad", "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87",
"sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3", "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d",
"sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1", "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93",
"sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46", "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf",
"sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc", "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158",
"sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a", "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84",
"sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee", "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb",
"sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900", "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48",
"sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5", "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171",
"sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea", "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c",
"sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f", "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6",
"sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5", "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd",
"sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e", "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d",
"sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a", "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1",
"sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f", "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d",
"sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50", "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca",
"sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a", "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a",
"sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b", "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29",
"sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4", "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe",
"sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff", "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798",
"sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2", "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c",
"sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46", "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8",
"sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b", "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f",
"sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf", "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f",
"sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5", "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a",
"sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5", "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178",
"sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab", "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0",
"sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd", "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79",
"sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68" "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430",
"sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"
], ],
"markers": "python_version >= '3.7'", "markers": "python_version >= '3.9'",
"version": "==2.1.5" "version": "==3.0.2"
}, },
"mitmproxy": { "mitmproxy": {
"editable": true, "editable": true,
@ -561,10 +578,11 @@
}, },
"pycparser": { "pycparser": {
"hashes": [ "hashes": [
"sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9", "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6",
"sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206" "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"
], ],
"version": "==2.21" "markers": "python_version >= '3.8'",
"version": "==2.22"
}, },
"pyopenssl": { "pyopenssl": {
"hashes": [ "hashes": [
@ -772,20 +790,22 @@
}, },
"tornado": { "tornado": {
"hashes": [ "hashes": [
"sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0", "sha256:007f036f7b661e899bd9ef3fa5f87eb2cb4d1b2e7d67368e778e140a2f101a7a",
"sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63", "sha256:03576ab51e9b1677e4cdaae620d6700d9823568b7939277e4690fe4085886c55",
"sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263", "sha256:119c03f440a832128820e87add8a175d211b7f36e7ee161c631780877c28f4fb",
"sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052", "sha256:231f2193bb4c28db2bdee9e57bc6ca0cd491f345cd307c57d79613b058e807e0",
"sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f", "sha256:542e380658dcec911215c4820654662810c06ad872eefe10def6a5e9b20e9633",
"sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee", "sha256:7c625b9d03f1fb4d64149c47d0135227f0434ebb803e2008040eb92906b0105a",
"sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78", "sha256:9a0d8d2309faf015903080fb5bdd969ecf9aa5ff893290845cf3fd5b2dd101bc",
"sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579", "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41",
"sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212", "sha256:ab75fe43d0e1b3a5e3ceddb2a611cb40090dd116a84fc216a07a298d9e000471",
"sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e", "sha256:c70c0a26d5b2d85440e4debd14a8d0b463a0cf35d92d3af05f5f1ffa8675c826",
"sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2" "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6",
"sha256:fd20c816e31be1bbff1f7681f970bbbd0bb241c364220140228ba24242bcdc59"
], ],
"markers": "python_version >= '3.8'", "index": "pypi",
"version": "==6.4" "markers": "python_version >= '3.9'",
"version": "==6.5"
}, },
"typing-extensions": { "typing-extensions": {
"hashes": [ "hashes": [
@ -803,12 +823,12 @@
}, },
"werkzeug": { "werkzeug": {
"hashes": [ "hashes": [
"sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8", "sha256:1bc0c2310d2fbb07b1dd1105eba2f7af72f322e1e455f2f93c993bee8c8a5f17",
"sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528" "sha256:a8dd59d4de28ca70471a34cba79bed5f7ef2e036a76b3ab0835474246eb41f8d"
], ],
"index": "pypi", "index": "pypi",
"markers": "python_version >= '3.8'", "markers": "python_version >= '3.8'",
"version": "==2.3.7" "version": "==3.0.6"
}, },
"wsproto": { "wsproto": {
"hashes": [ "hashes": [
@ -884,40 +904,40 @@
}, },
"black": { "black": {
"hashes": [ "hashes": [
"sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8", "sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f",
"sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8", "sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93",
"sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd", "sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11",
"sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9", "sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0",
"sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31", "sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9",
"sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92", "sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5",
"sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f", "sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213",
"sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29", "sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d",
"sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4", "sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7",
"sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693", "sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837",
"sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218", "sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f",
"sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a", "sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395",
"sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23", "sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995",
"sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0", "sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f",
"sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982", "sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597",
"sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894", "sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959",
"sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540", "sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5",
"sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430", "sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb",
"sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b", "sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4",
"sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2", "sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7",
"sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6", "sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd",
"sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d" "sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7"
], ],
"index": "pypi", "index": "pypi",
"markers": "python_version >= '3.8'", "markers": "python_version >= '3.8'",
"version": "==24.2.0" "version": "==24.3.0"
}, },
"click": { "click": {
"hashes": [ "hashes": [
"sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1", "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2",
"sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb" "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"
], ],
"markers": "python_version >= '3.6'", "markers": "python_version >= '3.7'",
"version": "==8.0.4" "version": "==8.1.8"
}, },
"flake8": { "flake8": {
"hashes": [ "hashes": [
@ -956,19 +976,19 @@
}, },
"mypy-extensions": { "mypy-extensions": {
"hashes": [ "hashes": [
"sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505",
"sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782" "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"
], ],
"markers": "python_version >= '3.5'", "markers": "python_version >= '3.8'",
"version": "==1.0.0" "version": "==1.1.0"
}, },
"packaging": { "packaging": {
"hashes": [ "hashes": [
"sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484",
"sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"
], ],
"markers": "python_version >= '3.7'", "markers": "python_version >= '3.8'",
"version": "==23.2" "version": "==25.0"
}, },
"pathspec": { "pathspec": {
"hashes": [ "hashes": [
@ -980,11 +1000,11 @@
}, },
"platformdirs": { "platformdirs": {
"hashes": [ "hashes": [
"sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068", "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc",
"sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768" "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"
], ],
"markers": "python_version >= '3.8'", "markers": "python_version >= '3.9'",
"version": "==4.2.0" "version": "==4.3.8"
}, },
"pycodestyle": { "pycodestyle": {
"hashes": [ "hashes": [
@ -1004,19 +1024,49 @@
}, },
"tomli": { "tomli": {
"hashes": [ "hashes": [
"sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc", "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6",
"sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f" "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd",
"sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c",
"sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b",
"sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8",
"sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6",
"sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77",
"sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff",
"sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea",
"sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192",
"sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249",
"sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee",
"sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4",
"sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98",
"sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8",
"sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4",
"sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281",
"sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744",
"sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69",
"sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13",
"sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140",
"sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e",
"sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e",
"sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc",
"sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff",
"sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec",
"sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2",
"sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222",
"sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106",
"sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272",
"sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a",
"sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"
], ],
"markers": "python_version < '3.11'", "markers": "python_version >= '3.8'",
"version": "==2.0.1" "version": "==2.2.1"
}, },
"typing-extensions": { "typing-extensions": {
"hashes": [ "hashes": [
"sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36",
"sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76"
], ],
"markers": "python_version >= '3.8'", "markers": "python_version >= '3.9'",
"version": "==4.9.0" "version": "==4.14.1"
} }
} }
} }

4
.gitattributes vendored
View File

@ -25,10 +25,10 @@ configure -whitespace
# except these exceptions... # except these exceptions...
src/backend/distributed/utils/citus_outfuncs.c -citus-style src/backend/distributed/utils/citus_outfuncs.c -citus-style
src/backend/distributed/deparser/ruleutils_13.c -citus-style
src/backend/distributed/deparser/ruleutils_14.c -citus-style
src/backend/distributed/deparser/ruleutils_15.c -citus-style src/backend/distributed/deparser/ruleutils_15.c -citus-style
src/backend/distributed/deparser/ruleutils_16.c -citus-style src/backend/distributed/deparser/ruleutils_16.c -citus-style
src/backend/distributed/deparser/ruleutils_17.c -citus-style
src/backend/distributed/deparser/ruleutils_18.c -citus-style
src/backend/distributed/commands/index_pg_source.c -citus-style src/backend/distributed/commands/index_pg_source.c -citus-style
src/include/distributed/citus_nodes.h -citus-style src/include/distributed/citus_nodes.h -citus-style

View File

@ -6,7 +6,7 @@ inputs:
runs: runs:
using: composite using: composite
steps: steps:
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
name: Upload logs name: Upload logs
with: with:
name: ${{ inputs.folder }} name: ${{ inputs.folder }}

View File

@ -17,7 +17,7 @@ runs:
echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV
fi fi
shell: bash shell: bash
- uses: actions/download-artifact@v3.0.1 - uses: actions/download-artifact@v4.1.8
with: with:
name: build-${{ env.PG_MAJOR }} name: build-${{ env.PG_MAJOR }}
- name: Install Extension - name: Install Extension

View File

@ -21,7 +21,7 @@ runs:
mkdir -p /tmp/codeclimate mkdir -p /tmp/codeclimate
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info
shell: bash shell: bash
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
with: with:
path: "/tmp/codeclimate/*.json" path: "/tmp/codeclimate/*.json"
name: codeclimate name: codeclimate-${{ inputs.flags }}

View File

@ -31,31 +31,31 @@ jobs:
pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester" pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester"
style_checker_image_name: "ghcr.io/citusdata/stylechecker" style_checker_image_name: "ghcr.io/citusdata/stylechecker"
style_checker_tools_version: "0.8.18" style_checker_tools_version: "0.8.18"
sql_snapshot_pg_version: "16.3" sql_snapshot_pg_version: "17.5"
image_suffix: "-v13fd57c" image_suffix: "-vb17c33b"
pg14_version: '{ "major": "14", "full": "14.12" }' pg15_version: '{ "major": "15", "full": "15.13" }'
pg15_version: '{ "major": "15", "full": "15.7" }' pg16_version: '{ "major": "16", "full": "16.9" }'
pg16_version: '{ "major": "16", "full": "16.3" }' pg17_version: '{ "major": "17", "full": "17.5" }'
upgrade_pg_versions: "14.12-15.7-16.3" upgrade_pg_versions: "15.13-16.9-17.5"
steps: steps:
# Since GHA jobs needs at least one step we use a noop step here. # Since GHA jobs need at least one step we use a noop step here.
- name: Set up parameters - name: Set up parameters
run: echo 'noop' run: echo 'noop'
check-sql-snapshots: check-sql-snapshots:
needs: params needs: params
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
container: container:
image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }} image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }}
options: --user root options: --user root
steps: steps:
- uses: actions/checkout@v3.5.0 - uses: actions/checkout@v4
- name: Check Snapshots - name: Check Snapshots
run: | run: |
git config --global --add safe.directory ${GITHUB_WORKSPACE} git config --global --add safe.directory ${GITHUB_WORKSPACE}
ci/check_sql_snapshots.sh ci/check_sql_snapshots.sh
check-style: check-style:
needs: params needs: params
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
container: container:
image: ${{ needs.params.outputs.style_checker_image_name }}:${{ needs.params.outputs.style_checker_tools_version }}${{ needs.params.outputs.image_suffix }} image: ${{ needs.params.outputs.style_checker_image_name }}:${{ needs.params.outputs.style_checker_tools_version }}${{ needs.params.outputs.image_suffix }}
steps: steps:
@ -110,10 +110,10 @@ jobs:
image_suffix: image_suffix:
- ${{ needs.params.outputs.image_suffix}} - ${{ needs.params.outputs.image_suffix}}
pg_version: pg_version:
- ${{ needs.params.outputs.pg14_version }}
- ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg16_version }}
runs-on: ubuntu-20.04 - ${{ needs.params.outputs.pg17_version }}
runs-on: ubuntu-latest
container: container:
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}" image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
options: --user root options: --user root
@ -125,7 +125,7 @@ jobs:
- name: Build - name: Build
run: "./ci/build-citus.sh" run: "./ci/build-citus.sh"
shell: bash shell: bash
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
with: with:
name: build-${{ env.PG_MAJOR }} name: build-${{ env.PG_MAJOR }}
path: |- path: |-
@ -141,9 +141,9 @@ jobs:
image_name: image_name:
- ${{ needs.params.outputs.test_image_name }} - ${{ needs.params.outputs.test_image_name }}
pg_version: pg_version:
- ${{ needs.params.outputs.pg14_version }}
- ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }}
make: make:
- check-split - check-split
- check-multi - check-multi
@ -161,10 +161,6 @@ jobs:
- check-enterprise-isolation-logicalrep-2 - check-enterprise-isolation-logicalrep-2
- check-enterprise-isolation-logicalrep-3 - check-enterprise-isolation-logicalrep-3
include: include:
- make: check-failure
pg_version: ${{ needs.params.outputs.pg14_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-failure - make: check-failure
pg_version: ${{ needs.params.outputs.pg15_version }} pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress suite: regress
@ -173,8 +169,8 @@ jobs:
pg_version: ${{ needs.params.outputs.pg16_version }} pg_version: ${{ needs.params.outputs.pg16_version }}
suite: regress suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }} image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-enterprise-failure - make: check-failure
pg_version: ${{ needs.params.outputs.pg14_version }} pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }} image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-enterprise-failure - make: check-enterprise-failure
@ -185,8 +181,8 @@ jobs:
pg_version: ${{ needs.params.outputs.pg16_version }} pg_version: ${{ needs.params.outputs.pg16_version }}
suite: regress suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }} image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest - make: check-enterprise-failure
pg_version: ${{ needs.params.outputs.pg14_version }} pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }} image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest - make: check-pytest
@ -197,6 +193,10 @@ jobs:
pg_version: ${{ needs.params.outputs.pg16_version }} pg_version: ${{ needs.params.outputs.pg16_version }}
suite: regress suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }} image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: check-pytest
pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
- make: installcheck - make: installcheck
suite: cdc suite: cdc
image_name: ${{ needs.params.outputs.test_image_name }} image_name: ${{ needs.params.outputs.test_image_name }}
@ -205,10 +205,10 @@ jobs:
suite: cdc suite: cdc
image_name: ${{ needs.params.outputs.test_image_name }} image_name: ${{ needs.params.outputs.test_image_name }}
pg_version: ${{ needs.params.outputs.pg16_version }} pg_version: ${{ needs.params.outputs.pg16_version }}
- make: check-query-generator - make: installcheck
pg_version: ${{ needs.params.outputs.pg14_version }} suite: cdc
suite: regress image_name: ${{ needs.params.outputs.test_image_name }}
image_name: ${{ needs.params.outputs.fail_test_image_name }} pg_version: ${{ needs.params.outputs.pg17_version }}
- make: check-query-generator - make: check-query-generator
pg_version: ${{ needs.params.outputs.pg15_version }} pg_version: ${{ needs.params.outputs.pg15_version }}
suite: regress suite: regress
@ -217,7 +217,11 @@ jobs:
pg_version: ${{ needs.params.outputs.pg16_version }} pg_version: ${{ needs.params.outputs.pg16_version }}
suite: regress suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }} image_name: ${{ needs.params.outputs.fail_test_image_name }}
runs-on: ubuntu-20.04 - make: check-query-generator
pg_version: ${{ needs.params.outputs.pg17_version }}
suite: regress
image_name: ${{ needs.params.outputs.fail_test_image_name }}
runs-on: ubuntu-latest
container: container:
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}" image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
options: --user root --dns=8.8.8.8 options: --user root --dns=8.8.8.8
@ -257,9 +261,9 @@ jobs:
image_name: image_name:
- ${{ needs.params.outputs.fail_test_image_name }} - ${{ needs.params.outputs.fail_test_image_name }}
pg_version: pg_version:
- ${{ needs.params.outputs.pg14_version }}
- ${{ needs.params.outputs.pg15_version }} - ${{ needs.params.outputs.pg15_version }}
- ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg16_version }}
- ${{ needs.params.outputs.pg17_version }}
parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@ -284,14 +288,16 @@ jobs:
check-arbitrary-configs parallel=4 CONFIGS=$TESTS check-arbitrary-configs parallel=4 CONFIGS=$TESTS
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
- uses: "./.github/actions/upload_coverage" - uses: "./.github/actions/upload_coverage"
if: always() if: always()
with: with:
flags: ${{ env.pg_major }}_upgrade flags: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
codecov_token: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-pg-upgrade: test-pg-upgrade:
name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
container: container:
image: "${{ needs.params.outputs.pgupgrade_image_name }}:${{ needs.params.outputs.upgrade_pg_versions }}${{ needs.params.outputs.image_suffix }}" image: "${{ needs.params.outputs.pgupgrade_image_name }}:${{ needs.params.outputs.upgrade_pg_versions }}${{ needs.params.outputs.image_suffix }}"
options: --user root options: --user root
@ -302,12 +308,12 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
include: include:
- old_pg_major: 14
new_pg_major: 15
- old_pg_major: 15 - old_pg_major: 15
new_pg_major: 16 new_pg_major: 16
- old_pg_major: 14 - old_pg_major: 16
new_pg_major: 16 new_pg_major: 17
- old_pg_major: 15
new_pg_major: 17
env: env:
old_pg_major: ${{ matrix.old_pg_major }} old_pg_major: ${{ matrix.old_pg_major }}
new_pg_major: ${{ matrix.new_pg_major }} new_pg_major: ${{ matrix.new_pg_major }}
@ -335,16 +341,18 @@ jobs:
if: failure() if: failure()
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
- uses: "./.github/actions/upload_coverage" - uses: "./.github/actions/upload_coverage"
if: always() if: always()
with: with:
flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
codecov_token: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-citus-upgrade: test-citus-upgrade:
name: PG${{ fromJson(needs.params.outputs.pg14_version).major }} - check-citus-upgrade name: PG${{ fromJson(needs.params.outputs.pg15_version).major }} - check-citus-upgrade
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
container: container:
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg14_version).full }}${{ needs.params.outputs.image_suffix }}" image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg15_version).full }}${{ needs.params.outputs.image_suffix }}"
options: --user root options: --user root
needs: needs:
- params - params
@ -380,18 +388,21 @@ jobs:
done; done;
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: ${{ env.PG_MAJOR }}_citus_upgrade
- uses: "./.github/actions/upload_coverage" - uses: "./.github/actions/upload_coverage"
if: always() if: always()
with: with:
flags: ${{ env.pg_major }}_upgrade flags: ${{ env.PG_MAJOR }}_citus_upgrade
codecov_token: ${{ secrets.CODECOV_TOKEN }} codecov_token: ${{ secrets.CODECOV_TOKEN }}
upload-coverage: upload-coverage:
if: always() # secret below is not available for forks so disabling upload action for them
if: ${{ github.event.pull_request.head.repo.full_name == github.repository || github.event_name != 'pull_request' }}
env: env:
CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }} CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }}
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
container: container:
image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }} image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg17_version).full }}${{ needs.params.outputs.image_suffix }}
needs: needs:
- params - params
- test-citus - test-citus
@ -399,10 +410,11 @@ jobs:
- test-citus-upgrade - test-citus-upgrade
- test-pg-upgrade - test-pg-upgrade
steps: steps:
- uses: actions/download-artifact@v3.0.1 - uses: actions/download-artifact@v4.1.8
with: with:
name: "codeclimate" pattern: codeclimate*
path: "codeclimate" path: codeclimate
merge-multiple: true
- name: Upload coverage results to Code Climate - name: Upload coverage results to Code Climate
run: |- run: |-
cc-test-reporter sum-coverage codeclimate/*.json -o total.json cc-test-reporter sum-coverage codeclimate/*.json -o total.json
@ -410,7 +422,7 @@ jobs:
ch_benchmark: ch_benchmark:
name: CH Benchmark name: CH Benchmark
if: startsWith(github.ref, 'refs/heads/ch_benchmark/') if: startsWith(github.ref, 'refs/heads/ch_benchmark/')
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
needs: needs:
- build - build
steps: steps:
@ -428,7 +440,7 @@ jobs:
tpcc_benchmark: tpcc_benchmark:
name: TPCC Benchmark name: TPCC Benchmark
if: startsWith(github.ref, 'refs/heads/tpcc_benchmark/') if: startsWith(github.ref, 'refs/heads/tpcc_benchmark/')
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
needs: needs:
- build - build
steps: steps:
@ -444,10 +456,10 @@ jobs:
chmod +x run_hammerdb.sh chmod +x run_hammerdb.sh
run_hammerdb.sh citusbot_tpcc_benchmark_rg run_hammerdb.sh citusbot_tpcc_benchmark_rg
prepare_parallelization_matrix_32: prepare_parallelization_matrix_32:
name: Parallel 32 name: Prepare parallelization matrix
if: ${{ needs.test-flakyness-pre.outputs.tests != ''}} if: ${{ needs.test-flakyness-pre.outputs.tests != ''}}
needs: test-flakyness-pre needs: test-flakyness-pre
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
outputs: outputs:
json: ${{ steps.parallelization.outputs.json }} json: ${{ steps.parallelization.outputs.json }}
steps: steps:
@ -459,7 +471,7 @@ jobs:
test-flakyness-pre: test-flakyness-pre:
name: Detect regression tests need to be ran name: Detect regression tests need to be ran
if: ${{ !inputs.skip_test_flakyness }}} if: ${{ !inputs.skip_test_flakyness }}}
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
needs: build needs: build
outputs: outputs:
tests: ${{ steps.detect-regression-tests.outputs.tests }} tests: ${{ steps.detect-regression-tests.outputs.tests }}
@ -500,9 +512,9 @@ jobs:
test-flakyness: test-flakyness:
if: ${{ needs.test-flakyness-pre.outputs.tests != ''}} if: ${{ needs.test-flakyness-pre.outputs.tests != ''}}
name: Test flakyness name: Test flakyness
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
container: container:
image: ${{ needs.params.outputs.fail_test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }} image: ${{ needs.params.outputs.fail_test_image_name }}:${{ fromJson(needs.params.outputs.pg17_version).full }}${{ needs.params.outputs.image_suffix }}
options: --user root options: --user root
env: env:
runs: 8 runs: 8
@ -516,6 +528,7 @@ jobs:
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: actions/download-artifact@v4.1.8
- uses: "./.github/actions/setup_extension" - uses: "./.github/actions/setup_extension"
- name: Run minimal tests - name: Run minimal tests
run: |- run: |-
@ -529,3 +542,5 @@ jobs:
shell: bash shell: bash
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with:
folder: test_flakyness_parallel_${{ matrix.id }}

View File

@ -24,7 +24,7 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v2 uses: github/codeql-action/init@v3
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
@ -76,4 +76,4 @@ jobs:
sudo make install-all sudo make install-all
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2 uses: github/codeql-action/analyze@v3

View File

@ -16,6 +16,11 @@ on:
jobs: jobs:
docker: docker:
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: read
packages: write
attestations: write
id-token: write
steps: steps:
- -
name: Docker meta name: Docker meta

View File

@ -34,7 +34,7 @@ jobs:
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
./ci/build-citus.sh ./ci/build-citus.sh
shell: bash shell: bash
- uses: actions/upload-artifact@v3.1.1 - uses: actions/upload-artifact@v4.6.0
with: with:
name: build-${{ env.PG_MAJOR }} name: build-${{ env.PG_MAJOR }}
path: |- path: |-
@ -76,4 +76,4 @@ jobs:
- uses: "./.github/actions/save_logs_and_results" - uses: "./.github/actions/save_logs_and_results"
if: always() if: always()
with: with:
folder: ${{ matrix.id }} folder: check_flakyness_parallel_${{ matrix.id }}

View File

@ -116,7 +116,6 @@ jobs:
# for each deb based image and we use POSTGRES_VERSION to set # for each deb based image and we use POSTGRES_VERSION to set
# PG_CONFIG variable in each of those runs. # PG_CONFIG variable in each of those runs.
packaging_docker_image: packaging_docker_image:
- debian-buster-all
- debian-bookworm-all - debian-bookworm-all
- debian-bullseye-all - debian-bullseye-all
- ubuntu-focal-all - ubuntu-focal-all
@ -130,7 +129,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Set pg_config path and python parameters for deb based distros - name: Set pg_config path and python parameters for deb based distros
run: | run: |

View File

@ -1,3 +1,232 @@
### citus v13.1.0 (May 30th, 2025) ###
* Adds `citus_stat_counters` view that can be used to query
stat counters that Citus collects while the feature is enabled, which is
controlled by citus.enable_stat_counters. `citus_stat_counters()` can be
used to query the stat counters for the provided database oid and
`citus_stat_counters_reset()` can be used to reset them for the provided
database oid or for the current database if nothing or 0 is provided (#7917)
* Adds `citus_nodes` view that displays the node name, port role, and "active"
for nodes in the cluster (#7968)
* Adds `citus_is_primary_node()` UDF to determine if the current node is a
primary node in the cluster (#7720)
* Adds support for propagating `GRANT/REVOKE` rights on table columns (#7918)
* Adds support for propagating `REASSIGN OWNED BY` commands (#7319)
* Adds support for propagating `CREATE`/`DROP` database from all nodes (#7240,
#7253, #7359)
* Propagates `SECURITY LABEL ON ROLE` statement from any node (#7508)
* Adds support for issuing role management commands from worker nodes (#7278)
* Adds support for propagating `ALTER USER RENAME` commands (#7204)
* Adds support for propagating `ALTER DATABASE <db_name> SET ..` commands
(#7181)
* Adds support for propagating `SECURITY LABEL` on tables and columns (#7956)
* Adds support for propagating `COMMENT ON <database>/<role>` commands (#7388)
* Moves some of the internal citus functions from `pg_catalog` to
`citus_internal` schema (#7473, #7470, #7466, 7456, 7450)
* Adjusts `max_prepared_transactions` only when it's set to default on PG >= 16
(#7712)
* Adds skip_qualify_public param to shard_name() UDF to allow qualifying for
"public" schema when needed (#8014)
* Allows `citus_*_size` on indexes on a distributed tables (#7271)
* Allows `GRANT ADMIN` to now also be `INHERIT` or `SET` in support of PG16
* Makes sure `worker_copy_table_to_node` errors out with Citus tables (#7662)
* Adds information to explain output when using
`citus.explain_distributed_queries=false` (#7412)
* Logs username in the failed connection message (#7432)
* Makes sure to avoid incorrectly pushing-down the outer joins between
distributed tables and recurring relations (like reference tables, local
tables and `VALUES(..)` etc.) prior to PG 17 (#7937)
* Prevents incorrectly pushing `nextval()` call down to workers to avoid using
incorrect sequence value for some types of `INSERT .. SELECT`s (#7976)
* Makes sure to prevent `INSERT INTO ... SELECT` queries involving subfield or
sublink, to avoid crashes (#7912)
* Makes sure to take improvement_threshold into the account
in `citus_add_rebalance_strategy()` (#7247)
* Makes sure to disallow creating a replicated distributed
table concurrently (#7219)
* Fixes a bug that causes omitting `CASCADE` clause for the commands sent to
workers for `REVOKE` commands on tables (#7958)
* Fixes an issue detected using address sanitizer (#7948, #7949)
* Fixes a bug in deparsing of shard query in case of "output-table column" name
conflict (#7932)
* Fixes a crash in columnar custom scan that happens when a columnar table is
used in a join (#7703)
* Fixes `MERGE` command when insert value does not have source distributed
column (#7627)
* Fixes performance issue when using `\d tablename` on a server with many
tables (#7577)
* Fixes performance issue in `GetForeignKeyOids` on systems with many
constraints (#7580)
* Fixes performance issue when distributing a table that depends on an
extension (#7574)
* Fixes performance issue when creating distributed tables if many already
exist (#7575)
* Fixes a crash caused by some form of `ALTER TABLE ADD COLUMN` statements. When
adding multiple columns, if one of the `ADD COLUMN` statements contains a
`FOREIGN` constraint ommitting the referenced
columns in the statement, a `SEGFAULT` occurs (#7522)
* Fixes assertion failure in maintenance daemon during Citus upgrades (#7537)
* Fixes segmentation fault when using `CASE WHEN` in `DO` block functions
(#7554)
* Fixes undefined behavior in `master_disable_node` due to argument mismatch
(#7492)
* Fixes incorrect propagating of `GRANTED BY` and `CASCADE/RESTRICT` clauses
for `REVOKE` statements (#7451)
* Fixes the incorrect column count after `ALTER TABLE` (#7379)
* Fixes timeout when underlying socket is changed for an inter-node connection
(#7377)
* Fixes memory leaks (#7441, #7440)
* Fixes leaking of memory and memory contexts when tracking foreign keys between
Citus tables (#7236)
* Fixes a potential segfault for background rebalancer (#7694)
* Fixes potential `NULL` dereference in casual clocks (#7704)
### citus v13.0.4 (May 29th, 2025) ###
* Fixes an issue detected using address sanitizer (#7966)
* Error out for queries with outer joins and pseudoconstant quals in versions
prior to PG 17 (#7937)
### citus v12.1.8 (May 29, 2025) ###
* Fixes a crash in left outer joins that can happen when there is an an
aggregate on a column from the inner side of the join (#7904)
* Fixes an issue detected using address sanitizer (#7965)
* Fixes a crash when executing a prepared CALL, which is not pure SQL but
available with some drivers like npgsql and jpgdbc (#7288)
### citus v13.0.3 (March 20th, 2025) ###
* Fixes a version bump issue in 13.0.2
### citus v13.0.2 (March 12th, 2025) ###
* Fixes a crash in columnar custom scan that happens when a columnar table is
used in a join. (#7647)
* Fixes a bug that breaks `UPDATE SET (...) = (SELECT some_func(),... )`
type of queries on Citus tables (#7914)
* Fixes a planning error caused by a redundant WHERE clause (#7907)
* Fixes a crash in left outer joins that can happen when there is an aggregate
on a column from the inner side of the join. (#7901)
* Fixes deadlock with transaction recovery that is possible during Citus
upgrades. (#7910)
* Fixes a bug that prevents inserting into Citus tables that uses
a GENERATED ALWAYS AS IDENTITY column. (#7920)
* Ensures that a MERGE command on a distributed table with a WHEN NOT MATCHED BY
SOURCE clause runs against all shards of the distributed table. (#7900)
* Fixes a bug that breaks router updates on distributed tables
when a reference table is used in the subquery (#7897)
### citus v12.1.7 (Feb 6, 2025) ###
* Fixes a crash that happens because of unsafe catalog access when re-assigning
the global pid after `application_name` changes (#7791)
* Prevents crashes when another extension skips executing the
`ClientAuthentication_hook` of Citus. (#7836)
### citus v13.0.1 (February 4th, 2025) ###
* Drops support for PostgreSQL 14 (#7753)
### citus v13.0.0 (January 22, 2025) ###
* Adds support for PostgreSQL 17 (#7699, #7661)
* Adds `JSON_TABLE()` support in distributed queries (#7816)
* Propagates `MERGE ... WHEN NOT MATCHED BY SOURCE` (#7807)
* Propagates `MEMORY` and `SERIALIZE` options of `EXPLAIN` (#7802)
* Adds support for identity columns in distributed partitioned tables (#7785)
* Allows specifying an access method for distributed partitioned tables (#7818)
* Allows exclusion constraints on distributed partitioned tables (#7733)
* Allows configuring sslnegotiation using `citus.node_conn_info` (#7821)
* Avoids wal receiver timeouts during large shard splits (#7229)
* Fixes a bug causing incorrect writing of data to target `MERGE` repartition
command (#7659)
* Fixes a crash that happens because of unsafe catalog access when re-assigning
the global pid after `application_name` changes (#7791)
* Fixes incorrect `VALID UNTIL` setting assumption made for roles when syncing
them to new nodes (#7534)
* Fixes segfault when calling distributed procedure with a parameterized
distribution argument (#7242)
* Fixes server crash when trying to execute `activate_node_snapshot()` on a
single-node cluster (#7552)
* Improves `citus_move_shard_placement()` to fail early if there is a new node
without reference tables yet (#7467)
### citus v12.1.6 (Nov 14, 2024) ###
* Propagates `SECURITY LABEL .. ON ROLE` statements (#7304)
* Fixes crash caused by running queries with window partition (#7718)
### citus v12.1.5 (July 17, 2024) ### ### citus v12.1.5 (July 17, 2024) ###
* Adds support for MERGE commands with single shard distributed target tables * Adds support for MERGE commands with single shard distributed target tables
@ -15,9 +244,8 @@
* Allows overwriting host name for all inter-node connections by * Allows overwriting host name for all inter-node connections by
supporting "host" parameter in citus.node_conninfo (#7541) supporting "host" parameter in citus.node_conninfo (#7541)
* Changes the order in which the locks are acquired for the target and * Avoids distributed deadlocks by changing the order in which the locks are
reference tables, when a modify request is initiated from a worker acquired for the target and reference tables (#7542)
node that is not the "FirstWorkerNode" (#7542)
* Fixes a performance issue when distributing a table that depends on an * Fixes a performance issue when distributing a table that depends on an
extension (#7574) extension (#7574)

View File

@ -35,6 +35,28 @@ To get citus installed from source we run `make install -s` in the first termina
With the Citus cluster running you can connect to the coordinator in the first terminal via `psql -p9700`. Because the coordinator is the most common entrypoint the `PGPORT` environment is set accordingly, so a simple `psql` will connect directly to the coordinator. With the Citus cluster running you can connect to the coordinator in the first terminal via `psql -p9700`. Because the coordinator is the most common entrypoint the `PGPORT` environment is set accordingly, so a simple `psql` will connect directly to the coordinator.
### Debugging in the VS code
1. Start Debugging: Press F5 in VS Code to start debugging. When prompted, you'll need to attach the debugger to the appropriate PostgreSQL process.
2. Identify the Process: If you're running a psql command, take note of the PID that appears in your psql prompt. For example:
```
[local] citus@citus:9700 (PID: 5436)=#
```
This PID (5436 in this case) indicates the process that you should attach the debugger to.
If you are uncertain about which process to attach, you can list all running PostgreSQL processes using the following command:
```
ps aux | grep postgres
```
Look for the process associated with the PID you noted. For example:
```
citus 5436 0.0 0.0 0 0 ? S 14:00 0:00 postgres: citus citus
```
4. Attach the Debugger: Once you've identified the correct PID, select that process when prompted in VS Code to attach the debugger. You should now be able to debug the PostgreSQL session tied to the psql command.
5. Set Breakpoints and Debug: With the debugger attached, you can set breakpoints within the code. This allows you to step through the code execution, inspect variables, and fully debug the PostgreSQL instance running in your container.
### Getting and building ### Getting and building
[PostgreSQL documentation](https://www.postgresql.org/support/versioning/) has a [PostgreSQL documentation](https://www.postgresql.org/support/versioning/) has a
@ -175,43 +197,7 @@ that are missing in earlier minor versions.
### Following our coding conventions ### Following our coding conventions
CI pipeline will automatically reject any PRs which do not follow our coding Our coding conventions are documented in [STYLEGUIDE.md](STYLEGUIDE.md).
conventions. The easiest way to ensure your PR adheres to those conventions is
to use the [citus_indent](https://github.com/citusdata/tools/tree/develop/uncrustify)
tool. This tool uses `uncrustify` under the hood.
```bash
# Uncrustify changes the way it formats code every release a bit. To make sure
# everyone formats consistently we use version 0.68.1:
curl -L https://github.com/uncrustify/uncrustify/archive/uncrustify-0.68.1.tar.gz | tar xz
cd uncrustify-uncrustify-0.68.1/
mkdir build
cd build
cmake ..
make -j5
sudo make install
cd ../..
git clone https://github.com/citusdata/tools.git
cd tools
make uncrustify/.install
```
Once you've done that, you can run the `make reindent` command from the top
directory to recursively check and correct the style of any source files in the
current directory. Under the hood, `make reindent` will run `citus_indent` and
some other style corrections for you.
You can also run the following in the directory of this repository to
automatically format all the files that you have changed before committing:
```bash
cat > .git/hooks/pre-commit << __EOF__
#!/bin/bash
citus_indent --check --diff || { citus_indent --diff; exit 1; }
__EOF__
chmod +x .git/hooks/pre-commit
```
### Making SQL changes ### Making SQL changes
@ -270,3 +256,28 @@ See [`src/test/regress/README.md`](https://github.com/citusdata/citus/blob/maste
User-facing documentation is published on [docs.citusdata.com](https://docs.citusdata.com/). When adding a new feature, function, or setting, you can open a pull request or issue against the [Citus docs repo](https://github.com/citusdata/citus_docs/). User-facing documentation is published on [docs.citusdata.com](https://docs.citusdata.com/). When adding a new feature, function, or setting, you can open a pull request or issue against the [Citus docs repo](https://github.com/citusdata/citus_docs/).
Detailed descriptions of the implementation for Citus developers are provided in the [Citus Technical Documentation](src/backend/distributed/README.md). It is currently a single file for ease of searching. Please update the documentation if you make any changes that affect the design or add major new features. Detailed descriptions of the implementation for Citus developers are provided in the [Citus Technical Documentation](src/backend/distributed/README.md). It is currently a single file for ease of searching. Please update the documentation if you make any changes that affect the design or add major new features.
# Making a pull request ready for reviews
Asking for help and asking for reviews are two different things. When you're asking for help, you're asking for someone to help you with something that you're not expected to know.
But when you're asking for a review, you're asking for someone to review your work and provide feedback. So, when you're asking for a review, you're expected to make sure that:
* Your changes don't perform **unnecessary line addition / deletions / style changes on unrelated files / lines**.
* All CI jobs are **passing**, including **style checks** and **flaky test detection jobs**. Note that if you're an external contributor, you don't have to wait CI jobs to run (and finish) because they don't get automatically triggered for external contributors.
* Your PR has necessary amount of **tests** and that they're passing.
* You separated as much as possible work into **separate PRs**, e.g., a prerequisite bugfix, a refactoring etc..
* Your PR doesn't introduce a typo or something that you can easily fix yourself.
* After all CI jobs pass, code-coverage measurement job (CodeCov as of today) then kicks in. That's why it's important to make the **tests passing** first. At that point, you're expected to check **CodeCov annotations** that can be seen in the **Files Changed** tab and expected to make sure that it doesn't complain about any lines that are not covered. For example, it's ok if CodeCov complains about an `ereport()` call that you put for an "unexpected-but-better-than-crashing" case, but it's not ok if it complains about an uncovered `if` branch that you added.
* And finally, perform a **self-review** to make sure that:
* Code and code-comments reflects the idea **without requiring an extra explanation** via a chat message / email / PR comment.
This is important because we don't expect developers to reach out to author / read about the whole discussion in the PR to understand the idea behind a commit merged into `main` branch.
* PR description is clear enough.
* If-and-only-if you're **introducing a user facing change / bugfix**, your PR has a line that starts with `DESCRIPTION: <Present simple tense word that starts with a capital letter, e.g., Adds support for / Fixes / Disallows>`.
* **Commit messages** are clear enough if the commits are doing logically different things.

View File

@ -1,4 +1,4 @@
| **<br/>The Citus database is 100% open source.<br/><img width=1000/><br/>Learn what's new in the [Citus 12.1 release blog](https://www.citusdata.com/blog/2023/09/22/adding-postgres-16-support-to-citus-12-1/) and the [Citus Updates page](https://www.citusdata.com/updates/).<br/><br/>**| | **<br/>The Citus database is 100% open source.<br/><img width=1000/><br/>Learn what's new in the [Citus 13.0 release blog](https://www.citusdata.com/blog/2025/02/06/distribute-postgresql-17-with-citus-13/) and the [Citus Updates page](https://www.citusdata.com/updates/).<br/><br/>**|
|---| |---|
<br/> <br/>
@ -95,14 +95,14 @@ Install packages on Ubuntu / Debian:
```bash ```bash
curl https://install.citusdata.com/community/deb.sh > add-citus-repo.sh curl https://install.citusdata.com/community/deb.sh > add-citus-repo.sh
sudo bash add-citus-repo.sh sudo bash add-citus-repo.sh
sudo apt-get -y install postgresql-16-citus-12.1 sudo apt-get -y install postgresql-17-citus-13.0
``` ```
Install packages on CentOS / Red Hat: Install packages on Red Hat:
```bash ```bash
curl https://install.citusdata.com/community/rpm.sh > add-citus-repo.sh curl https://install.citusdata.com/community/rpm.sh > add-citus-repo.sh
sudo bash add-citus-repo.sh sudo bash add-citus-repo.sh
sudo yum install -y citus121_16 sudo yum install -y citus130_17
``` ```
To add Citus to your local PostgreSQL database, add the following to `postgresql.conf`: To add Citus to your local PostgreSQL database, add the following to `postgresql.conf`:

160
STYLEGUIDE.md Normal file
View File

@ -0,0 +1,160 @@
# Coding style
The existing code-style in our code-base is not super consistent. There are multiple reasons for that. One big reason is because our code-base is relatively old and our standards have changed over time. The second big reason is that our style-guide is different from style-guide of Postgres and some code is copied from Postgres source code and is slightly modified. The below rules are for new code. If you're changing existing code that uses a different style, use your best judgement to decide if you use the rules here or if you match the existing style.
## Using citus_indent
CI pipeline will automatically reject any PRs which do not follow our coding
conventions. The easiest way to ensure your PR adheres to those conventions is
to use the [citus_indent](https://github.com/citusdata/tools/tree/develop/uncrustify)
tool. This tool uses `uncrustify` under the hood.
```bash
# Uncrustify changes the way it formats code every release a bit. To make sure
# everyone formats consistently we use version 0.68.1:
curl -L https://github.com/uncrustify/uncrustify/archive/uncrustify-0.68.1.tar.gz | tar xz
cd uncrustify-uncrustify-0.68.1/
mkdir build
cd build
cmake ..
make -j5
sudo make install
cd ../..
git clone https://github.com/citusdata/tools.git
cd tools
make uncrustify/.install
```
Once you've done that, you can run the `make reindent` command from the top
directory to recursively check and correct the style of any source files in the
current directory. Under the hood, `make reindent` will run `citus_indent` and
some other style corrections for you.
You can also run the following in the directory of this repository to
automatically format all the files that you have changed before committing:
```bash
cat > .git/hooks/pre-commit << __EOF__
#!/bin/bash
citus_indent --check --diff || { citus_indent --diff; exit 1; }
__EOF__
chmod +x .git/hooks/pre-commit
```
## Other rules we follow that citus_indent does not enforce
* We almost always use **CamelCase**, when naming functions, variables etc., **not snake_case**.
* We also have the habits of using a **lowerCamelCase** for some variables named from their type or from their function name, as shown in the examples:
```c
bool IsCitusExtensionLoaded = false;
bool
IsAlterTableRenameStmt(RenameStmt *renameStmt)
{
AlterTableCmd *alterTableCommand = NULL;
..
..
bool isAlterTableRenameStmt = false;
..
}
```
* We **start functions with a comment**:
```c
/*
* MyNiceFunction <something in present simple tense, e.g., processes / returns / checks / takes X as input / does Y> ..
* <some more nice words> ..
* <some more nice words> ..
*/
<static?> <return type>
MyNiceFunction(..)
{
..
..
}
```
* `#includes` needs to be sorted based on below ordering and then alphabetically and we should not include what we don't need in a file:
* System includes (eg. #include<...>)
* Postgres.h (eg. #include "postgres.h")
* Toplevel imports from postgres, not contained in a directory (eg. #include "miscadmin.h")
* General postgres includes (eg . #include "nodes/...")
* Toplevel citus includes, not contained in a directory (eg. #include "citus_verion.h")
* Columnar includes (eg. #include "columnar/...")
* Distributed includes (eg. #include "distributed/...")
* Comments:
```c
/* single line comments start with a lower-case */
/*
* We start multi-line comments with a capital letter
* and keep adding a star to the beginning of each line
* until we close the comment with a star and a slash.
*/
```
* Order of function implementations and their declarations in a file:
We define static functions after the functions that call them. For example:
```c
#include<..>
#include<..>
..
..
typedef struct
{
..
..
} MyNiceStruct;
..
..
PG_FUNCTION_INFO_V1(my_nice_udf1);
PG_FUNCTION_INFO_V1(my_nice_udf2);
..
..
// .. somewhere on top of the file …
static void MyNiceStaticlyDeclaredFunction1(…);
static void MyNiceStaticlyDeclaredFunction2(…);
..
..
void
MyNiceFunctionExternedViaHeaderFile(..)
{
..
..
MyNiceStaticlyDeclaredFunction1(..);
..
..
MyNiceStaticlyDeclaredFunction2(..);
..
}
..
..
// we define this first because it's called by MyNiceFunctionExternedViaHeaderFile()
// before MyNiceStaticlyDeclaredFunction2()
static void
MyNiceStaticlyDeclaredFunction1(…)
{
}
..
..
// then we define this
static void
MyNiceStaticlyDeclaredFunction2(…)
{
}
```

20
configure vendored
View File

@ -1,6 +1,6 @@
#! /bin/sh #! /bin/sh
# Guess values for system-dependent variables and create Makefiles. # Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for Citus 12.2devel. # Generated by GNU Autoconf 2.69 for Citus 13.2devel.
# #
# #
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@ -579,8 +579,8 @@ MAKEFLAGS=
# Identity of this package. # Identity of this package.
PACKAGE_NAME='Citus' PACKAGE_NAME='Citus'
PACKAGE_TARNAME='citus' PACKAGE_TARNAME='citus'
PACKAGE_VERSION='12.2devel' PACKAGE_VERSION='13.2devel'
PACKAGE_STRING='Citus 12.2devel' PACKAGE_STRING='Citus 13.2devel'
PACKAGE_BUGREPORT='' PACKAGE_BUGREPORT=''
PACKAGE_URL='' PACKAGE_URL=''
@ -1262,7 +1262,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing. # Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh. # This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF cat <<_ACEOF
\`configure' configures Citus 12.2devel to adapt to many kinds of systems. \`configure' configures Citus 13.2devel to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]... Usage: $0 [OPTION]... [VAR=VALUE]...
@ -1324,7 +1324,7 @@ fi
if test -n "$ac_init_help"; then if test -n "$ac_init_help"; then
case $ac_init_help in case $ac_init_help in
short | recursive ) echo "Configuration of Citus 12.2devel:";; short | recursive ) echo "Configuration of Citus 13.2devel:";;
esac esac
cat <<\_ACEOF cat <<\_ACEOF
@ -1429,7 +1429,7 @@ fi
test -n "$ac_init_help" && exit $ac_status test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then if $ac_init_version; then
cat <<\_ACEOF cat <<\_ACEOF
Citus configure 12.2devel Citus configure 13.2devel
generated by GNU Autoconf 2.69 generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc. Copyright (C) 2012 Free Software Foundation, Inc.
@ -1912,7 +1912,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake. running configure, to aid debugging if configure makes a mistake.
It was created by Citus $as_me 12.2devel, which was It was created by Citus $as_me 13.2devel, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@ $ $0 $@
@ -2588,7 +2588,7 @@ fi
if test "$with_pg_version_check" = no; then if test "$with_pg_version_check" = no; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num (skipped compatibility check)" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num (skipped compatibility check)" >&5
$as_echo "$as_me: building against PostgreSQL $version_num (skipped compatibility check)" >&6;} $as_echo "$as_me: building against PostgreSQL $version_num (skipped compatibility check)" >&6;}
elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16'; then elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5 as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
else else
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5
@ -5393,7 +5393,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their # report actual input values of CONFIG_FILES etc. instead of their
# values after options handling. # values after options handling.
ac_log=" ac_log="
This file was extended by Citus $as_me 12.2devel, which was This file was extended by Citus $as_me 13.2devel, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES CONFIG_FILES = $CONFIG_FILES
@ -5455,7 +5455,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\ ac_cs_version="\\
Citus config.status 12.2devel Citus config.status 13.2devel
configured by $0, generated by GNU Autoconf 2.69, configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\" with options \\"\$ac_cs_config\\"

View File

@ -5,7 +5,7 @@
# everyone needing autoconf installed, the resulting files are checked # everyone needing autoconf installed, the resulting files are checked
# into the SCM. # into the SCM.
AC_INIT([Citus], [12.2devel]) AC_INIT([Citus], [13.2devel])
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.]) AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
# we'll need sed and awk for some of the version commands # we'll need sed and awk for some of the version commands
@ -80,7 +80,7 @@ AC_SUBST(with_pg_version_check)
if test "$with_pg_version_check" = no; then if test "$with_pg_version_check" = no; then
AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)]) AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)])
elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16'; then elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.]) AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
else else
AC_MSG_NOTICE([building against PostgreSQL $version_num]) AC_MSG_NOTICE([building against PostgreSQL $version_num])

View File

@ -21,6 +21,13 @@
#include "catalog/pg_am.h" #include "catalog/pg_am.h"
#include "catalog/pg_statistic.h" #include "catalog/pg_statistic.h"
#include "commands/defrem.h" #include "commands/defrem.h"
#include "columnar/columnar_version_compat.h"
#if PG_VERSION_NUM >= PG_VERSION_18
#include "commands/explain_format.h"
#endif
#include "executor/executor.h" /* for ExecInitExprWithParams(), ExecEvalExpr() */
#include "nodes/execnodes.h" /* for ExprState, ExprContext, etc. */
#include "nodes/extensible.h" #include "nodes/extensible.h"
#include "nodes/makefuncs.h" #include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h" #include "nodes/nodeFuncs.h"
@ -363,7 +370,7 @@ ColumnarGetRelationInfoHook(PlannerInfo *root, Oid relationObjectId,
/* disable index-only scan */ /* disable index-only scan */
IndexOptInfo *indexOptInfo = NULL; IndexOptInfo *indexOptInfo = NULL;
foreach_ptr(indexOptInfo, rel->indexlist) foreach_declared_ptr(indexOptInfo, rel->indexlist)
{ {
memset(indexOptInfo->canreturn, false, indexOptInfo->ncolumns * sizeof(bool)); memset(indexOptInfo->canreturn, false, indexOptInfo->ncolumns * sizeof(bool));
} }
@ -381,7 +388,7 @@ RemovePathsByPredicate(RelOptInfo *rel, PathPredicate removePathPredicate)
List *filteredPathList = NIL; List *filteredPathList = NIL;
Path *path = NULL; Path *path = NULL;
foreach_ptr(path, rel->pathlist) foreach_declared_ptr(path, rel->pathlist)
{ {
if (!removePathPredicate(path)) if (!removePathPredicate(path))
{ {
@ -428,7 +435,7 @@ static void
CostColumnarPaths(PlannerInfo *root, RelOptInfo *rel, Oid relationId) CostColumnarPaths(PlannerInfo *root, RelOptInfo *rel, Oid relationId)
{ {
Path *path = NULL; Path *path = NULL;
foreach_ptr(path, rel->pathlist) foreach_declared_ptr(path, rel->pathlist)
{ {
if (IsA(path, IndexPath)) if (IsA(path, IndexPath))
{ {
@ -783,7 +790,7 @@ ExtractPushdownClause(PlannerInfo *root, RelOptInfo *rel, Node *node)
List *pushdownableArgs = NIL; List *pushdownableArgs = NIL;
Node *boolExprArg = NULL; Node *boolExprArg = NULL;
foreach_ptr(boolExprArg, boolExpr->args) foreach_declared_ptr(boolExprArg, boolExpr->args)
{ {
Expr *pushdownableArg = ExtractPushdownClause(root, rel, Expr *pushdownableArg = ExtractPushdownClause(root, rel,
(Node *) boolExprArg); (Node *) boolExprArg);
@ -1051,6 +1058,15 @@ FindCandidateRelids(PlannerInfo *root, RelOptInfo *rel, List *joinClauses)
candidateRelids = bms_del_members(candidateRelids, rel->relids); candidateRelids = bms_del_members(candidateRelids, rel->relids);
candidateRelids = bms_del_members(candidateRelids, rel->lateral_relids); candidateRelids = bms_del_members(candidateRelids, rel->lateral_relids);
/*
* For the relevant PG16 commit requiring this addition:
* postgres/postgres@2489d76
*/
#if PG_VERSION_NUM >= PG_VERSION_16
candidateRelids = bms_del_members(candidateRelids, root->outer_join_rels);
#endif
return candidateRelids; return candidateRelids;
} }
@ -1312,11 +1328,8 @@ AddColumnarScanPath(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte,
cpath->methods = &ColumnarScanPathMethods; cpath->methods = &ColumnarScanPathMethods;
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* necessary to avoid extra Result node in PG15 */ /* necessary to avoid extra Result node in PG15 */
cpath->flags = CUSTOMPATH_SUPPORT_PROJECTION; cpath->flags = CUSTOMPATH_SUPPORT_PROJECTION;
#endif
/* /*
* populate generic path information * populate generic path information
@ -1550,7 +1563,7 @@ ColumnarPerStripeScanCost(RelOptInfo *rel, Oid relationId, int numberOfColumnsRe
uint32 maxColumnCount = 0; uint32 maxColumnCount = 0;
uint64 totalStripeSize = 0; uint64 totalStripeSize = 0;
StripeMetadata *stripeMetadata = NULL; StripeMetadata *stripeMetadata = NULL;
foreach_ptr(stripeMetadata, stripeList) foreach_declared_ptr(stripeMetadata, stripeList)
{ {
totalStripeSize += stripeMetadata->dataLength; totalStripeSize += stripeMetadata->dataLength;
maxColumnCount = Max(maxColumnCount, stripeMetadata->columnCount); maxColumnCount = Max(maxColumnCount, stripeMetadata->columnCount);
@ -1924,11 +1937,6 @@ ColumnarScan_EndCustomScan(CustomScanState *node)
*/ */
TableScanDesc scanDesc = node->ss.ss_currentScanDesc; TableScanDesc scanDesc = node->ss.ss_currentScanDesc;
/*
* Free the exprcontext
*/
ExecFreeExprContext(&node->ss.ps);
/* /*
* clean out the tuple table * clean out the tuple table
*/ */

View File

@ -645,10 +645,10 @@ SaveStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
{ {
values[Anum_columnar_chunk_minimum_value - 1] = values[Anum_columnar_chunk_minimum_value - 1] =
PointerGetDatum(DatumToBytea(chunk->minimumValue, PointerGetDatum(DatumToBytea(chunk->minimumValue,
&tupleDescriptor->attrs[columnIndex])); Attr(tupleDescriptor, columnIndex)));
values[Anum_columnar_chunk_maximum_value - 1] = values[Anum_columnar_chunk_maximum_value - 1] =
PointerGetDatum(DatumToBytea(chunk->maximumValue, PointerGetDatum(DatumToBytea(chunk->maximumValue,
&tupleDescriptor->attrs[columnIndex])); Attr(tupleDescriptor, columnIndex)));
} }
else else
{ {
@ -803,9 +803,9 @@ ReadStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
datumArray[Anum_columnar_chunk_maximum_value - 1]); datumArray[Anum_columnar_chunk_maximum_value - 1]);
chunk->minimumValue = chunk->minimumValue =
ByteaToDatum(minValue, &tupleDescriptor->attrs[columnIndex]); ByteaToDatum(minValue, Attr(tupleDescriptor, columnIndex));
chunk->maximumValue = chunk->maximumValue =
ByteaToDatum(maxValue, &tupleDescriptor->attrs[columnIndex]); ByteaToDatum(maxValue, Attr(tupleDescriptor, columnIndex));
chunk->hasMinMax = true; chunk->hasMinMax = true;
} }
@ -1391,7 +1391,17 @@ UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, bool *update,
Oid columnarStripesOid = ColumnarStripeRelationId(); Oid columnarStripesOid = ColumnarStripeRelationId();
Relation columnarStripes = table_open(columnarStripesOid, AccessShareLock); #if PG_VERSION_NUM >= 180000
/* CatalogTupleUpdate performs a normal heap UPDATE → RowExclusiveLock */
const LOCKMODE openLockMode = RowExclusiveLock;
#else
/* Inplace update never changed tuple length → AccessShareLock was enough */
const LOCKMODE openLockMode = AccessShareLock;
#endif
Relation columnarStripes = table_open(columnarStripesOid, openLockMode);
Oid indexId = ColumnarStripePKeyIndexRelationId(); Oid indexId = ColumnarStripePKeyIndexRelationId();
bool indexOk = OidIsValid(indexId); bool indexOk = OidIsValid(indexId);
@ -1414,15 +1424,20 @@ UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, bool *update,
storageId, stripeId))); storageId, stripeId)));
} }
bool newNulls[Natts_columnar_stripe] = { false };
TupleDesc tupleDescriptor = RelationGetDescr(columnarStripes);
HeapTuple modifiedTuple = heap_modify_tuple(oldTuple,
tupleDescriptor,
newValues,
newNulls,
update);
#if PG_VERSION_NUM < PG_VERSION_18
/* /*
* heap_inplace_update already doesn't allow changing size of the original * heap_inplace_update already doesn't allow changing size of the original
* tuple, so we don't allow setting any Datum's to NULL values. * tuple, so we don't allow setting any Datum's to NULL values.
*/ */
bool newNulls[Natts_columnar_stripe] = { false };
TupleDesc tupleDescriptor = RelationGetDescr(columnarStripes);
HeapTuple modifiedTuple = heap_modify_tuple(oldTuple, tupleDescriptor,
newValues, newNulls, update);
heap_inplace_update(columnarStripes, modifiedTuple); heap_inplace_update(columnarStripes, modifiedTuple);
/* /*
@ -1430,18 +1445,24 @@ UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, bool *update,
* heap_inplace_update(). * heap_inplace_update().
*/ */
HeapTuple newTuple = oldTuple; HeapTuple newTuple = oldTuple;
#else
/* Regular catalog UPDATE keeps indexes in sync */
CatalogTupleUpdate(columnarStripes, &oldTuple->t_self, modifiedTuple);
HeapTuple newTuple = modifiedTuple;
#endif
CommandCounterIncrement();
/* /*
* Must not pass modifiedTuple, because BuildStripeMetadata expects a real * Must not pass modifiedTuple, because BuildStripeMetadata expects a real
* heap tuple with MVCC fields. * heap tuple with MVCC fields.
*/ */
StripeMetadata *modifiedStripeMetadata = BuildStripeMetadata(columnarStripes, StripeMetadata *modifiedStripeMetadata =
newTuple); BuildStripeMetadata(columnarStripes, newTuple);
CommandCounterIncrement();
systable_endscan(scanDescriptor); systable_endscan(scanDescriptor);
table_close(columnarStripes, AccessShareLock); table_close(columnarStripes, openLockMode);
/* return StripeMetadata object built from modified tuple */ /* return StripeMetadata object built from modified tuple */
return modifiedStripeMetadata; return modifiedStripeMetadata;
@ -1685,7 +1706,7 @@ DeleteTupleAndEnforceConstraints(ModifyState *state, HeapTuple heapTuple)
simple_heap_delete(state->rel, tid); simple_heap_delete(state->rel, tid);
/* execute AFTER ROW DELETE Triggers to enforce constraints */ /* execute AFTER ROW DELETE Triggers to enforce constraints */
ExecARDeleteTriggers_compat(estate, resultRelInfo, tid, NULL, NULL, false); ExecARDeleteTriggers(estate, resultRelInfo, tid, NULL, NULL, false);
} }
@ -1727,12 +1748,37 @@ create_estate_for_relation(Relation rel)
rte->relkind = rel->rd_rel->relkind; rte->relkind = rel->rd_rel->relkind;
rte->rellockmode = AccessShareLock; rte->rellockmode = AccessShareLock;
/* Prepare permission info on PG 16+ */
#if PG_VERSION_NUM >= PG_VERSION_16 #if PG_VERSION_NUM >= PG_VERSION_16
List *perminfos = NIL; List *perminfos = NIL;
addRTEPermissionInfo(&perminfos, rte); addRTEPermissionInfo(&perminfos, rte);
ExecInitRangeTable(estate, list_make1(rte), perminfos); #endif
/* Initialize the range table, with the right signature for each PG version */
#if PG_VERSION_NUM >= PG_VERSION_18
/* PG 18+ needs four arguments (unpruned_relids) */
ExecInitRangeTable(
estate,
list_make1(rte),
perminfos,
NULL /* unpruned_relids: not used by columnar */
);
#elif PG_VERSION_NUM >= PG_VERSION_16
/* PG 1617: three-arg signature (permInfos) */
ExecInitRangeTable(
estate,
list_make1(rte),
perminfos
);
#else #else
ExecInitRangeTable(estate, list_make1(rte));
/* PG 15: two-arg signature */
ExecInitRangeTable(
estate,
list_make1(rte)
);
#endif #endif
estate->es_output_cid = GetCurrentCommandId(true); estate->es_output_cid = GetCurrentCommandId(true);
@ -2041,7 +2087,7 @@ GetHighestUsedRowNumber(uint64 storageId)
List *stripeMetadataList = ReadDataFileStripeList(storageId, List *stripeMetadataList = ReadDataFileStripeList(storageId,
GetTransactionSnapshot()); GetTransactionSnapshot());
StripeMetadata *stripeMetadata = NULL; StripeMetadata *stripeMetadata = NULL;
foreach_ptr(stripeMetadata, stripeMetadataList) foreach_declared_ptr(stripeMetadata, stripeMetadataList)
{ {
highestRowNumber = Max(highestRowNumber, highestRowNumber = Max(highestRowNumber,
StripeGetHighestRowNumber(stripeMetadata)); StripeGetHighestRowNumber(stripeMetadata));

View File

@ -880,7 +880,7 @@ ReadChunkGroupNextRow(ChunkGroupReadState *chunkGroupReadState, Datum *columnVal
memset(columnNulls, true, sizeof(bool) * chunkGroupReadState->columnCount); memset(columnNulls, true, sizeof(bool) * chunkGroupReadState->columnCount);
int attno; int attno;
foreach_int(attno, chunkGroupReadState->projectedColumnList) foreach_declared_int(attno, chunkGroupReadState->projectedColumnList)
{ {
const ChunkData *chunkGroupData = chunkGroupReadState->chunkGroupData; const ChunkData *chunkGroupData = chunkGroupReadState->chunkGroupData;
const int rowIndex = chunkGroupReadState->currentRow; const int rowIndex = chunkGroupReadState->currentRow;
@ -1489,7 +1489,7 @@ ProjectedColumnMask(uint32 columnCount, List *projectedColumnList)
bool *projectedColumnMask = palloc0(columnCount * sizeof(bool)); bool *projectedColumnMask = palloc0(columnCount * sizeof(bool));
int attno; int attno;
foreach_int(attno, projectedColumnList) foreach_declared_int(attno, projectedColumnList)
{ {
/* attno is 1-indexed; projectedColumnMask is 0-indexed */ /* attno is 1-indexed; projectedColumnMask is 0-indexed */
int columnIndex = attno - 1; int columnIndex = attno - 1;

View File

@ -877,7 +877,7 @@ columnar_relation_set_new_filelocator(Relation rel,
*freezeXid = RecentXmin; *freezeXid = RecentXmin;
*minmulti = GetOldestMultiXactId(); *minmulti = GetOldestMultiXactId();
SMgrRelation srel = RelationCreateStorage_compat(*newrlocator, persistence, true); SMgrRelation srel = RelationCreateStorage(*newrlocator, persistence, true);
ColumnarStorageInit(srel, ColumnarMetadataNewStorageId()); ColumnarStorageInit(srel, ColumnarMetadataNewStorageId());
InitColumnarOptions(rel->rd_id); InitColumnarOptions(rel->rd_id);
@ -1012,7 +1012,7 @@ NeededColumnsList(TupleDesc tupdesc, Bitmapset *attr_needed)
for (int i = 0; i < tupdesc->natts; i++) for (int i = 0; i < tupdesc->natts; i++)
{ {
if (tupdesc->attrs[i].attisdropped) if (Attr(tupdesc, i)->attisdropped)
{ {
continue; continue;
} }
@ -1121,10 +1121,27 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params,
bool frozenxid_updated; bool frozenxid_updated;
bool minmulti_updated; bool minmulti_updated;
/* for PG 18+, vac_update_relstats gained a new “all_frozen” param */
#if PG_VERSION_NUM >= PG_VERSION_18
/* all frozen pages are always 0, because columnar stripes never store XIDs */
BlockNumber new_rel_allfrozen = 0;
vac_update_relstats(rel, new_rel_pages, new_live_tuples,
new_rel_allvisible, /* allvisible */
new_rel_allfrozen, /* all_frozen */
nindexes > 0,
newRelFrozenXid, newRelminMxid,
&frozenxid_updated, &minmulti_updated,
false);
#else
vac_update_relstats(rel, new_rel_pages, new_live_tuples, vac_update_relstats(rel, new_rel_pages, new_live_tuples,
new_rel_allvisible, nindexes > 0, new_rel_allvisible, nindexes > 0,
newRelFrozenXid, newRelminMxid, newRelFrozenXid, newRelminMxid,
&frozenxid_updated, &minmulti_updated, false); &frozenxid_updated, &minmulti_updated,
false);
#endif
#else #else
TransactionId oldestXmin; TransactionId oldestXmin;
TransactionId freezeLimit; TransactionId freezeLimit;
@ -1187,10 +1204,19 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params,
#endif #endif
#endif #endif
#if PG_VERSION_NUM >= PG_VERSION_18
pgstat_report_vacuum(RelationGetRelid(rel),
rel->rd_rel->relisshared,
Max(new_live_tuples, 0), /* live tuples */
0, /* dead tuples */
GetCurrentTimestamp()); /* start time */
#else
pgstat_report_vacuum(RelationGetRelid(rel), pgstat_report_vacuum(RelationGetRelid(rel),
rel->rd_rel->relisshared, rel->rd_rel->relisshared,
Max(new_live_tuples, 0), Max(new_live_tuples, 0),
0); 0);
#endif
pgstat_progress_end_command(); pgstat_progress_end_command();
} }
@ -1225,7 +1251,7 @@ LogRelationStats(Relation rel, int elevel)
GetTransactionSnapshot()); GetTransactionSnapshot());
for (uint32 column = 0; column < skiplist->columnCount; column++) for (uint32 column = 0; column < skiplist->columnCount; column++)
{ {
bool attrDropped = tupdesc->attrs[column].attisdropped; bool attrDropped = Attr(tupdesc, column)->attisdropped;
for (uint32 chunk = 0; chunk < skiplist->chunkCount; chunk++) for (uint32 chunk = 0; chunk < skiplist->chunkCount; chunk++)
{ {
ColumnChunkSkipNode *skipnode = ColumnChunkSkipNode *skipnode =
@ -1424,15 +1450,32 @@ ConditionalLockRelationWithTimeout(Relation rel, LOCKMODE lockMode, int timeout,
static bool static bool
columnar_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno, columnar_scan_analyze_next_block(TableScanDesc scan,
#if PG_VERSION_NUM >= PG_VERSION_17
ReadStream *stream)
#else
BlockNumber blockno,
BufferAccessStrategy bstrategy) BufferAccessStrategy bstrategy)
#endif
{ {
/* /*
* Our access method is not pages based, i.e. tuples are not confined * Our access method is not pages based, i.e. tuples are not confined
* to pages boundaries. So not much to do here. We return true anyway * to pages boundaries. So not much to do here. We return true anyway
* so acquire_sample_rows() in analyze.c would call our * so acquire_sample_rows() in analyze.c would call our
* columnar_scan_analyze_next_tuple() callback. * columnar_scan_analyze_next_tuple() callback.
* In PG17, we return false in case there is no buffer left, since
* the outer loop changed in acquire_sample_rows(), and it is
* expected for the scan_analyze_next_block function to check whether
* there are any blocks left in the block sampler.
*/ */
#if PG_VERSION_NUM >= PG_VERSION_17
Buffer buf = read_stream_next_buffer(stream, NULL);
if (!BufferIsValid(buf))
{
return false;
}
ReleaseBuffer(buf);
#endif
return true; return true;
} }
@ -2228,7 +2271,6 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions
columnarRangeVar = alterTableStmt->relation; columnarRangeVar = alterTableStmt->relation;
} }
} }
#if PG_VERSION_NUM >= PG_VERSION_15
else if (alterTableCmd->subtype == AT_SetAccessMethod) else if (alterTableCmd->subtype == AT_SetAccessMethod)
{ {
if (columnarRangeVar || *columnarOptions) if (columnarRangeVar || *columnarOptions)
@ -2239,14 +2281,15 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions
"Specify SET ACCESS METHOD before storage parameters, or use separate ALTER TABLE commands."))); "Specify SET ACCESS METHOD before storage parameters, or use separate ALTER TABLE commands.")));
} }
destIsColumnar = (strcmp(alterTableCmd->name, COLUMNAR_AM_NAME) == 0); destIsColumnar = (strcmp(alterTableCmd->name ? alterTableCmd->name :
default_table_access_method,
COLUMNAR_AM_NAME) == 0);
if (srcIsColumnar && !destIsColumnar) if (srcIsColumnar && !destIsColumnar)
{ {
DeleteColumnarTableOptions(RelationGetRelid(rel), true); DeleteColumnarTableOptions(RelationGetRelid(rel), true);
} }
} }
#endif /* PG_VERSION_15 */
} }
relation_close(rel, NoLock); relation_close(rel, NoLock);
@ -2547,8 +2590,13 @@ static const TableAmRoutine columnar_am_methods = {
.relation_estimate_size = columnar_estimate_rel_size, .relation_estimate_size = columnar_estimate_rel_size,
#if PG_VERSION_NUM < PG_VERSION_18
/* these two fields were removed in PG18 */
.scan_bitmap_next_block = NULL, .scan_bitmap_next_block = NULL,
.scan_bitmap_next_tuple = NULL, .scan_bitmap_next_tuple = NULL,
#endif
.scan_sample_next_block = columnar_scan_sample_next_block, .scan_sample_next_block = columnar_scan_sample_next_block,
.scan_sample_next_tuple = columnar_scan_sample_next_tuple .scan_sample_next_tuple = columnar_scan_sample_next_tuple
}; };
@ -2586,7 +2634,7 @@ detoast_values(TupleDesc tupleDesc, Datum *orig_values, bool *isnull)
for (int i = 0; i < tupleDesc->natts; i++) for (int i = 0; i < tupleDesc->natts; i++)
{ {
if (!isnull[i] && tupleDesc->attrs[i].attlen == -1 && if (!isnull[i] && Attr(tupleDesc, i)->attlen == -1 &&
VARATT_IS_EXTENDED(values[i])) VARATT_IS_EXTENDED(values[i]))
{ {
/* make a copy */ /* make a copy */
@ -2630,21 +2678,12 @@ ColumnarCheckLogicalReplication(Relation rel)
return; return;
} }
#if PG_VERSION_NUM >= PG_VERSION_15
{ {
PublicationDesc pubdesc; PublicationDesc pubdesc;
RelationBuildPublicationDesc(rel, &pubdesc); RelationBuildPublicationDesc(rel, &pubdesc);
pubActionInsert = pubdesc.pubactions.pubinsert; pubActionInsert = pubdesc.pubactions.pubinsert;
} }
#else
if (rel->rd_pubactions == NULL)
{
GetRelationPublicationActions(rel);
Assert(rel->rd_pubactions != NULL);
}
pubActionInsert = rel->rd_pubactions->pubinsert;
#endif
if (pubActionInsert) if (pubActionInsert)
{ {
@ -3021,6 +3060,8 @@ AvailableExtensionVersionColumnar(void)
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("citus extension is not found"))); errmsg("citus extension is not found")));
return NULL; /* keep compiler happy */
} }
@ -3083,7 +3124,7 @@ DefElem *
GetExtensionOption(List *extensionOptions, const char *defname) GetExtensionOption(List *extensionOptions, const char *defname)
{ {
DefElem *defElement = NULL; DefElem *defElement = NULL;
foreach_ptr(defElement, extensionOptions) foreach_declared_ptr(defElement, extensionOptions)
{ {
if (IsA(defElement, DefElem) && if (IsA(defElement, DefElem) &&
strncmp(defElement->defname, defname, NAMEDATALEN) == 0) strncmp(defElement->defname, defname, NAMEDATALEN) == 0)

View File

@ -18,7 +18,7 @@ generated_downgrade_sql_files += $(patsubst %,$(citus_abs_srcdir)/build/sql/%,$(
DATA_built = $(generated_sql_files) DATA_built = $(generated_sql_files)
# directories with source files # directories with source files
SUBDIRS = . commands connection ddl deparser executor metadata operations planner progress relay safeclib shardsplit test transaction utils worker clock SUBDIRS = . commands connection ddl deparser executor metadata operations planner progress relay safeclib shardsplit stats test transaction utils worker clock
# enterprise modules # enterprise modules
SUBDIRS += replication SUBDIRS += replication

View File

@ -57,6 +57,8 @@ The purpose of this document is to provide comprehensive technical documentation
- [Query from any node](#query-from-any-node) - [Query from any node](#query-from-any-node)
- [Why didnt we have dedicated Query Nodes and Data Nodes?](#why-didnt-we-have-dedicated-query-nodes-and-data-nodes) - [Why didnt we have dedicated Query Nodes and Data Nodes?](#why-didnt-we-have-dedicated-query-nodes-and-data-nodes)
- [Shard visibility](#shard-visibility) - [Shard visibility](#shard-visibility)
- [Statistic tracking](#statistic-tracking)
- [Citus stat counters](#citus-stat-counters)
# Citus Concepts # Citus Concepts
@ -2702,3 +2704,43 @@ Shards can be revealed via two settings:
- `citus.override_shard_visibility = off` disables shard hiding entirely - `citus.override_shard_visibility = off` disables shard hiding entirely
- `citus.show_shards_for_app_name_prefixes`= 'pgAdmin,psql'` disables shard hiding only for specific application_name values, by prefix - `citus.show_shards_for_app_name_prefixes`= 'pgAdmin,psql'` disables shard hiding only for specific application_name values, by prefix
## Statistic tracking
Statistic views defined by Postgres already work well for one Citus node, like `pg_stat_database`, `pg_stat_activity`, `pg_stat_statements`, etc. And for some of them, we even provide wrapper views in Citus to have a global (i.e., cluster-wide) view of the statistics, like `citus_stat_activity`.
And beside these, Citus itself also provides some additional statistic views to track the Citus-specific activities. Note that the way we collect statastics for each is quite different.
- `citus_stat_tenants` (needs documentation)
- `citus_stat_statements` (needs documentation)
- `citus_stat_counters`
### Citus stat counters
Citus keeps track of several stat counters and exposes them via the `citus_stat_counters` view. The counters are tracked once `citus.enable_stat_counters` is set to true. Also, `citus_stat_counters_reset()` can be used to reset the counters for the given database if a database id different than 0 (default, InvalidOid) is provided, otherwise, it resets the counters for the current database.
Details about the implementation and its caveats can be found in the header comment of [stat_counters.c](/src/backend/distributed/stats/stat_counters.c). However, at the high level;
1. We allocate a shared memory array of length `MaxBackends` so that each backend has its own counter slot to reduce the contention while incrementing the counters at the runtime.
2. We also allocate a shared hash, whose entries correspond to individual databases. Then, when a backend exits, it first aggregates its counters to the relevant entry in the shared hash, and then it resets its own counters because the same counter slot might be reused by another backend later.
Note that today we use the regular shared hash table API (`ShmemInitHash()`) to do this, but we should consider using `dshash_table()` once using many databases with Citus becomes "practically" possible because the performance of the regular shared hash table API is supposed to degrade when the number of entries in the hash table becomes much larger than the `max_size` parameter provided when creating the shared hash table.
3. So, when `citus_stat_counters` is queried, we first aggregate the counters from the shared memory array and then we add this with the counters aggregated so far in the relevant shared hash entry for the database.
This means that if we weren't aggregating the counters in the shared hash when exiting, counters seen in `citus_stat_counters` could drift backwards in time.
Note that `citus_stat_counters` might observe the counters for a backend twice or perhaps unsee it if the backend was concurrently exiting. However, the next call to `citus_stat_counters` will see the correct values for the counters if the same situation doesn't happen due to another backend that is exiting concurrently, so we can live with that for now. However, if one day we think that this is very undesirable, we can enforce blocking behavior between the whole period of `citus_stat_counters` queries and saving the counters in the shared hash entry. However, that will also mean that exiting backends will have to wait for any active `citus_stat_counters` queries to finish, so this needs to be carefully considered.
4. Finally, when `citus_stat_counters_reset()` is called, we reset the shared hash entry for the relevant database and also reset the relevant slots in the shared memory array for the provided database.
Note that there is chance that `citus_stat_counters_reset()` might partially fail to reset the counters for of a backend slot under some rare circumstances, but this should be very rare and we choose to ignore that for the sake of lock-free counter increments.
5. Also, today neither of `citus_stat_counters` nor `citus_stat_counters_reset()` explicitly exclude the backend slots that belong to exited backends during their operations. Instead, they consider any "not unused" backend slots where the relevant `PGPROC` points to a valid database oid, which doesn't guarantee that the backend slot is actively used. However, in practice, this is not a problem for neither of these operations due to the reasons mentioned in the relevant functions. However, if we ever decide that the way that they operate slow down these operations, we can consider explicitly excluding the exited backend slots by checking (Citus) `BackendData`'s `activeBackend` field for the backend slots.
6. As of today, we don't persist stat counters on server shutdown. Although it seems quite straightforward to do so, we skipped doing that at v1. Once we decide to persist the counters, one can check the relevant functions that we have for `citus_stat_statements`, namely, `CitusQueryStatsShmemShutdown()` and `CitusQueryStatsShmemStartup()`. And since it has been quite a long time since we wrote these two functions, we should also make sure to check `pgstat_write_statsfile()` and `pgstat_read_statsfile()` in Postgres to double check if we're missing anything -it seems we have a few-.
7. Note that today we don't evict the entries of the said hash table that point to dropped databases because the wrapper view anyway filters them out (thanks to LEFT JOIN) and we don't expect a performance hit when reading from / writing to the hash table because of that unless users have a lot of databases that are dropped and recreated frequently. If one day we think that this is a problem, then we can let Citus maintenance daemon to do that for us periodically.
The reason why we don't just use a shared hash table for the counters is that it could be more expensive to do hash lookups for each increment. Even more importantly, using a single counter slot for all the backends that are connected to the same database could lead to contention because that definitely requires a lock to be taken or the contended usage of atomic integers etc.. However, incrementing a counter in today's implementation doesn't require acquiring any sort of locks.
Also, as of writing section, it seems quite likely that Postgres will expose their Cumulative Statistics infra starting with Postgres 18, see https://github.com/postgres/postgres/commit/7949d9594582ab49dee221e1db1aa5401ace49d4.
So, once this happens, we can also consider using the same infra to track Citus stat counters. However, we can only do that once we drop support for Postgres versions older than 18.
### A side note on query stat counters
Initially, we were thinking of tracking query stat counters at the very end of the planner, namely, via `FinalizePlan()` function. However, that would mean not tracking the execution of the prepared statements because a prepared statement is not planned again once its plan is cached. To give a bit more details, query plan for a prepared statement is typically cached when the same prepared statement is executed five times by Postgres (hard-coded value). Even further, a plan may even be cached after the first time it's executed if it's straightforward enough (e.g. when it doesn't have any parameters).
For this reason, we track the query stat counters at appropriate places within the CustomScan implementations provided by Citus for adaptive executor and non-pushable insert-select / merge executors.

View File

@ -22,6 +22,8 @@
#include "utils/rel.h" #include "utils/rel.h"
#include "utils/typcache.h" #include "utils/typcache.h"
#include "pg_version_constants.h"
PG_MODULE_MAGIC; PG_MODULE_MAGIC;
extern void _PG_output_plugin_init(OutputPluginCallbacks *cb); extern void _PG_output_plugin_init(OutputPluginCallbacks *cb);
@ -435,6 +437,74 @@ TranslateChangesIfSchemaChanged(Relation sourceRelation, Relation targetRelation
return; return;
} }
#if PG_VERSION_NUM >= PG_VERSION_17
/* Check the ReorderBufferChange's action type and handle them accordingly.*/
switch (change->action)
{
case REORDER_BUFFER_CHANGE_INSERT:
{
/* For insert action, only new tuple should always be translated*/
HeapTuple sourceRelationNewTuple = change->data.tp.newtuple;
HeapTuple targetRelationNewTuple = GetTupleForTargetSchemaForCdc(
sourceRelationNewTuple, sourceRelationDesc, targetRelationDesc);
change->data.tp.newtuple = targetRelationNewTuple;
break;
}
/*
* For update changes both old and new tuples need to be translated for target relation
* if the REPLICA IDENTITY is set to FULL. Otherwise, only the new tuple needs to be
* translated for target relation.
*/
case REORDER_BUFFER_CHANGE_UPDATE:
{
/* For update action, new tuple should always be translated*/
/* Get the new tuple from the ReorderBufferChange, and translate it to target relation. */
HeapTuple sourceRelationNewTuple = change->data.tp.newtuple;
HeapTuple targetRelationNewTuple = GetTupleForTargetSchemaForCdc(
sourceRelationNewTuple, sourceRelationDesc, targetRelationDesc);
change->data.tp.newtuple = targetRelationNewTuple;
/*
* Format oldtuple according to the target relation. If the column values of replica
* identiy change, then the old tuple is non-null and needs to be formatted according
* to the target relation schema.
*/
if (change->data.tp.oldtuple != NULL)
{
HeapTuple sourceRelationOldTuple = change->data.tp.oldtuple;
HeapTuple targetRelationOldTuple = GetTupleForTargetSchemaForCdc(
sourceRelationOldTuple,
sourceRelationDesc,
targetRelationDesc);
change->data.tp.oldtuple = targetRelationOldTuple;
}
break;
}
case REORDER_BUFFER_CHANGE_DELETE:
{
/* For delete action, only old tuple should be translated*/
HeapTuple sourceRelationOldTuple = change->data.tp.oldtuple;
HeapTuple targetRelationOldTuple = GetTupleForTargetSchemaForCdc(
sourceRelationOldTuple,
sourceRelationDesc,
targetRelationDesc);
change->data.tp.oldtuple = targetRelationOldTuple;
break;
}
default:
{
/* Do nothing for other action types. */
break;
}
}
#else
/* Check the ReorderBufferChange's action type and handle them accordingly.*/ /* Check the ReorderBufferChange's action type and handle them accordingly.*/
switch (change->action) switch (change->action)
{ {
@ -499,4 +569,5 @@ TranslateChangesIfSchemaChanged(Relation sourceRelation, Relation targetRelation
break; break;
} }
} }
#endif
} }

View File

@ -346,12 +346,12 @@ CdcIsReferenceTableViaCatalog(Oid relationId)
return false; return false;
} }
Datum datumArray[Natts_pg_dist_partition];
bool isNullArray[Natts_pg_dist_partition];
Relation pgDistPartition = table_open(DistPartitionRelationId(), AccessShareLock); Relation pgDistPartition = table_open(DistPartitionRelationId(), AccessShareLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition); TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition);
Datum *datumArray = (Datum *) palloc(tupleDescriptor->natts * sizeof(Datum));
bool *isNullArray = (bool *) palloc(tupleDescriptor->natts * sizeof(bool));
heap_deform_tuple(partitionTuple, tupleDescriptor, datumArray, isNullArray); heap_deform_tuple(partitionTuple, tupleDescriptor, datumArray, isNullArray);
if (isNullArray[Anum_pg_dist_partition_partmethod - 1] || if (isNullArray[Anum_pg_dist_partition_partmethod - 1] ||
@ -363,6 +363,8 @@ CdcIsReferenceTableViaCatalog(Oid relationId)
*/ */
heap_freetuple(partitionTuple); heap_freetuple(partitionTuple);
table_close(pgDistPartition, NoLock); table_close(pgDistPartition, NoLock);
pfree(datumArray);
pfree(isNullArray);
return false; return false;
} }
@ -374,6 +376,8 @@ CdcIsReferenceTableViaCatalog(Oid relationId)
heap_freetuple(partitionTuple); heap_freetuple(partitionTuple);
table_close(pgDistPartition, NoLock); table_close(pgDistPartition, NoLock);
pfree(datumArray);
pfree(isNullArray);
/* /*
* A table is a reference table when its partition method is 'none' * A table is a reference table when its partition method is 'none'

View File

@ -1,6 +1,6 @@
# Citus extension # Citus extension
comment = 'Citus distributed database' comment = 'Citus distributed database'
default_version = '12.2-1' default_version = '13.2-1'
module_pathname = '$libdir/citus' module_pathname = '$libdir/citus'
relocatable = false relocatable = false
schema = pg_catalog schema = pg_catalog

View File

@ -145,17 +145,6 @@ LogicalClockShmemSize(void)
void void
InitializeClusterClockMem(void) InitializeClusterClockMem(void)
{ {
/* On PG 15 and above, we use shmem_request_hook_type */
#if PG_VERSION_NUM < PG_VERSION_15
/* allocate shared memory for pre PG-15 versions */
if (!IsUnderPostmaster)
{
RequestAddinShmemSpace(LogicalClockShmemSize());
}
#endif
prev_shmem_startup_hook = shmem_startup_hook; prev_shmem_startup_hook = shmem_startup_hook;
shmem_startup_hook = LogicalClockShmemInit; shmem_startup_hook = LogicalClockShmemInit;
} }
@ -328,7 +317,7 @@ GetHighestClockInTransaction(List *nodeConnectionList)
{ {
MultiConnection *connection = NULL; MultiConnection *connection = NULL;
foreach_ptr(connection, nodeConnectionList) foreach_declared_ptr(connection, nodeConnectionList)
{ {
int querySent = int querySent =
SendRemoteCommand(connection, "SELECT citus_get_node_clock();"); SendRemoteCommand(connection, "SELECT citus_get_node_clock();");
@ -349,7 +338,7 @@ GetHighestClockInTransaction(List *nodeConnectionList)
globalClockValue->counter))); globalClockValue->counter)));
/* fetch the results and pick the highest clock value of all the nodes */ /* fetch the results and pick the highest clock value of all the nodes */
foreach_ptr(connection, nodeConnectionList) foreach_declared_ptr(connection, nodeConnectionList)
{ {
bool raiseInterrupts = true; bool raiseInterrupts = true;
@ -431,6 +420,11 @@ PrepareAndSetTransactionClock(void)
MultiConnection *connection = dlist_container(MultiConnection, transactionNode, MultiConnection *connection = dlist_container(MultiConnection, transactionNode,
iter.cur); iter.cur);
WorkerNode *workerNode = FindWorkerNode(connection->hostname, connection->port); WorkerNode *workerNode = FindWorkerNode(connection->hostname, connection->port);
if (!workerNode)
{
ereport(WARNING, errmsg("Worker node is missing"));
continue;
}
/* Skip the node if we already in the list */ /* Skip the node if we already in the list */
if (list_member_int(nodeList, workerNode->groupId)) if (list_member_int(nodeList, workerNode->groupId))

View File

@ -414,7 +414,7 @@ UndistributeTables(List *relationIdList)
*/ */
List *originalForeignKeyRecreationCommands = NIL; List *originalForeignKeyRecreationCommands = NIL;
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList) foreach_declared_oid(relationId, relationIdList)
{ {
List *fkeyCommandsForRelation = List *fkeyCommandsForRelation =
GetFKeyCreationCommandsRelationInvolvedWithTableType(relationId, GetFKeyCreationCommandsRelationInvolvedWithTableType(relationId,
@ -802,7 +802,7 @@ ConvertTableInternal(TableConversionState *con)
List *partitionList = PartitionList(con->relationId); List *partitionList = PartitionList(con->relationId);
Oid partitionRelationId = InvalidOid; Oid partitionRelationId = InvalidOid;
foreach_oid(partitionRelationId, partitionList) foreach_declared_oid(partitionRelationId, partitionList)
{ {
char *tableQualifiedName = generate_qualified_relation_name( char *tableQualifiedName = generate_qualified_relation_name(
partitionRelationId); partitionRelationId);
@ -873,7 +873,7 @@ ConvertTableInternal(TableConversionState *con)
} }
TableDDLCommand *tableCreationCommand = NULL; TableDDLCommand *tableCreationCommand = NULL;
foreach_ptr(tableCreationCommand, preLoadCommands) foreach_declared_ptr(tableCreationCommand, preLoadCommands)
{ {
Assert(CitusIsA(tableCreationCommand, TableDDLCommand)); Assert(CitusIsA(tableCreationCommand, TableDDLCommand));
@ -947,7 +947,7 @@ ConvertTableInternal(TableConversionState *con)
con->suppressNoticeMessages); con->suppressNoticeMessages);
TableDDLCommand *tableConstructionCommand = NULL; TableDDLCommand *tableConstructionCommand = NULL;
foreach_ptr(tableConstructionCommand, postLoadCommands) foreach_declared_ptr(tableConstructionCommand, postLoadCommands)
{ {
Assert(CitusIsA(tableConstructionCommand, TableDDLCommand)); Assert(CitusIsA(tableConstructionCommand, TableDDLCommand));
char *tableConstructionSQL = GetTableDDLCommand(tableConstructionCommand); char *tableConstructionSQL = GetTableDDLCommand(tableConstructionCommand);
@ -965,7 +965,7 @@ ConvertTableInternal(TableConversionState *con)
MemoryContext oldContext = MemoryContextSwitchTo(citusPerPartitionContext); MemoryContext oldContext = MemoryContextSwitchTo(citusPerPartitionContext);
char *attachPartitionCommand = NULL; char *attachPartitionCommand = NULL;
foreach_ptr(attachPartitionCommand, attachPartitionCommands) foreach_declared_ptr(attachPartitionCommand, attachPartitionCommands)
{ {
MemoryContextReset(citusPerPartitionContext); MemoryContextReset(citusPerPartitionContext);
@ -990,7 +990,7 @@ ConvertTableInternal(TableConversionState *con)
/* For now we only support cascade to colocation for alter_distributed_table UDF */ /* For now we only support cascade to colocation for alter_distributed_table UDF */
Assert(con->conversionType == ALTER_DISTRIBUTED_TABLE); Assert(con->conversionType == ALTER_DISTRIBUTED_TABLE);
foreach_oid(colocatedTableId, con->colocatedTableList) foreach_declared_oid(colocatedTableId, con->colocatedTableList)
{ {
if (colocatedTableId == con->relationId) if (colocatedTableId == con->relationId)
{ {
@ -1018,7 +1018,7 @@ ConvertTableInternal(TableConversionState *con)
if (con->cascadeToColocated != CASCADE_TO_COLOCATED_NO_ALREADY_CASCADED) if (con->cascadeToColocated != CASCADE_TO_COLOCATED_NO_ALREADY_CASCADED)
{ {
char *foreignKeyCommand = NULL; char *foreignKeyCommand = NULL;
foreach_ptr(foreignKeyCommand, foreignKeyCommands) foreach_declared_ptr(foreignKeyCommand, foreignKeyCommands)
{ {
ExecuteQueryViaSPI(foreignKeyCommand, SPI_OK_UTILITY); ExecuteQueryViaSPI(foreignKeyCommand, SPI_OK_UTILITY);
} }
@ -1054,7 +1054,7 @@ CopyTableConversionReturnIntoCurrentContext(TableConversionReturn *tableConversi
tableConversionReturnCopy = palloc0(sizeof(TableConversionReturn)); tableConversionReturnCopy = palloc0(sizeof(TableConversionReturn));
List *copyForeignKeyCommands = NIL; List *copyForeignKeyCommands = NIL;
char *foreignKeyCommand = NULL; char *foreignKeyCommand = NULL;
foreach_ptr(foreignKeyCommand, tableConversionReturn->foreignKeyCommands) foreach_declared_ptr(foreignKeyCommand, tableConversionReturn->foreignKeyCommands)
{ {
char *copyForeignKeyCommand = MemoryContextStrdup(CurrentMemoryContext, char *copyForeignKeyCommand = MemoryContextStrdup(CurrentMemoryContext,
foreignKeyCommand); foreignKeyCommand);
@ -1129,7 +1129,7 @@ DropIndexesNotSupportedByColumnar(Oid relationId, bool suppressNoticeMessages)
RelationClose(columnarRelation); RelationClose(columnarRelation);
Oid indexId = InvalidOid; Oid indexId = InvalidOid;
foreach_oid(indexId, indexIdList) foreach_declared_oid(indexId, indexIdList)
{ {
char *indexAmName = GetIndexAccessMethodName(indexId); char *indexAmName = GetIndexAccessMethodName(indexId);
if (extern_ColumnarSupportsIndexAM(indexAmName)) if (extern_ColumnarSupportsIndexAM(indexAmName))
@ -1389,7 +1389,7 @@ CreateTableConversion(TableConversionParameters *params)
* since they will be handled separately. * since they will be handled separately.
*/ */
Oid colocatedTableId = InvalidOid; Oid colocatedTableId = InvalidOid;
foreach_oid(colocatedTableId, colocatedTableList) foreach_declared_oid(colocatedTableId, colocatedTableList)
{ {
if (PartitionTable(colocatedTableId)) if (PartitionTable(colocatedTableId))
{ {
@ -1605,7 +1605,7 @@ DoesCascadeDropUnsupportedObject(Oid classId, Oid objectId, HTAB *nodeMap)
targetObjectId); targetObjectId);
HeapTuple depTup = NULL; HeapTuple depTup = NULL;
foreach_ptr(depTup, dependencyTupleList) foreach_declared_ptr(depTup, dependencyTupleList)
{ {
Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup); Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
@ -1645,7 +1645,7 @@ GetViewCreationCommandsOfTable(Oid relationId)
List *commands = NIL; List *commands = NIL;
Oid viewOid = InvalidOid; Oid viewOid = InvalidOid;
foreach_oid(viewOid, views) foreach_declared_oid(viewOid, views)
{ {
StringInfo query = makeStringInfo(); StringInfo query = makeStringInfo();
@ -1683,7 +1683,7 @@ WrapTableDDLCommands(List *commandStrings)
List *tableDDLCommands = NIL; List *tableDDLCommands = NIL;
char *command = NULL; char *command = NULL;
foreach_ptr(command, commandStrings) foreach_declared_ptr(command, commandStrings)
{ {
tableDDLCommands = lappend(tableDDLCommands, makeTableDDLCommandString(command)); tableDDLCommands = lappend(tableDDLCommands, makeTableDDLCommandString(command));
} }
@ -1840,7 +1840,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
*/ */
List *ownedSequences = getOwnedSequences_internal(sourceId, 0, DEPENDENCY_AUTO); List *ownedSequences = getOwnedSequences_internal(sourceId, 0, DEPENDENCY_AUTO);
Oid sequenceOid = InvalidOid; Oid sequenceOid = InvalidOid;
foreach_oid(sequenceOid, ownedSequences) foreach_declared_oid(sequenceOid, ownedSequences)
{ {
changeDependencyFor(RelationRelationId, sequenceOid, changeDependencyFor(RelationRelationId, sequenceOid,
RelationRelationId, sourceId, targetId); RelationRelationId, sourceId, targetId);
@ -1873,7 +1873,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands,
} }
char *justBeforeDropCommand = NULL; char *justBeforeDropCommand = NULL;
foreach_ptr(justBeforeDropCommand, justBeforeDropCommands) foreach_declared_ptr(justBeforeDropCommand, justBeforeDropCommands)
{ {
ExecuteQueryViaSPI(justBeforeDropCommand, SPI_OK_UTILITY); ExecuteQueryViaSPI(justBeforeDropCommand, SPI_OK_UTILITY);
} }
@ -1987,7 +1987,7 @@ CheckAlterDistributedTableConversionParameters(TableConversionState *con)
Oid colocatedTableOid = InvalidOid; Oid colocatedTableOid = InvalidOid;
text *colocateWithText = cstring_to_text(con->colocateWith); text *colocateWithText = cstring_to_text(con->colocateWith);
Oid colocateWithTableOid = ResolveRelationId(colocateWithText, false); Oid colocateWithTableOid = ResolveRelationId(colocateWithText, false);
foreach_oid(colocatedTableOid, con->colocatedTableList) foreach_declared_oid(colocatedTableOid, con->colocatedTableList)
{ {
if (colocateWithTableOid == colocatedTableOid) if (colocateWithTableOid == colocatedTableOid)
{ {
@ -2214,7 +2214,7 @@ WillRecreateForeignKeyToReferenceTable(Oid relationId,
{ {
List *colocatedTableList = ColocatedTableList(relationId); List *colocatedTableList = ColocatedTableList(relationId);
Oid colocatedTableOid = InvalidOid; Oid colocatedTableOid = InvalidOid;
foreach_oid(colocatedTableOid, colocatedTableList) foreach_declared_oid(colocatedTableOid, colocatedTableList)
{ {
if (HasForeignKeyToReferenceTable(colocatedTableOid)) if (HasForeignKeyToReferenceTable(colocatedTableOid))
{ {
@ -2242,7 +2242,7 @@ WarningsForDroppingForeignKeysWithDistributedTables(Oid relationId)
List *foreignKeys = list_concat(referencingForeingKeys, referencedForeignKeys); List *foreignKeys = list_concat(referencingForeingKeys, referencedForeignKeys);
Oid foreignKeyOid = InvalidOid; Oid foreignKeyOid = InvalidOid;
foreach_oid(foreignKeyOid, foreignKeys) foreach_declared_oid(foreignKeyOid, foreignKeys)
{ {
ereport(WARNING, (errmsg("foreign key %s will be dropped", ereport(WARNING, (errmsg("foreign key %s will be dropped",
get_constraint_name(foreignKeyOid)))); get_constraint_name(foreignKeyOid))));

View File

@ -33,7 +33,7 @@ SaveBeginCommandProperties(TransactionStmt *transactionStmt)
* *
* While BEGIN can be quite frequent it will rarely have options set. * While BEGIN can be quite frequent it will rarely have options set.
*/ */
foreach_ptr(item, transactionStmt->options) foreach_declared_ptr(item, transactionStmt->options)
{ {
A_Const *constant = (A_Const *) item->arg; A_Const *constant = (A_Const *) item->arg;

View File

@ -168,7 +168,7 @@ GetPartitionRelationIds(List *relationIdList)
List *partitionRelationIdList = NIL; List *partitionRelationIdList = NIL;
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList) foreach_declared_oid(relationId, relationIdList)
{ {
if (PartitionTable(relationId)) if (PartitionTable(relationId))
{ {
@ -189,7 +189,7 @@ LockRelationsWithLockMode(List *relationIdList, LOCKMODE lockMode)
{ {
Oid relationId; Oid relationId;
relationIdList = SortList(relationIdList, CompareOids); relationIdList = SortList(relationIdList, CompareOids);
foreach_oid(relationId, relationIdList) foreach_declared_oid(relationId, relationIdList)
{ {
LockRelationOid(relationId, lockMode); LockRelationOid(relationId, lockMode);
} }
@ -207,7 +207,7 @@ static void
ErrorIfConvertingMultiLevelPartitionedTable(List *relationIdList) ErrorIfConvertingMultiLevelPartitionedTable(List *relationIdList)
{ {
Oid relationId; Oid relationId;
foreach_oid(relationId, relationIdList) foreach_declared_oid(relationId, relationIdList)
{ {
if (PartitionedTable(relationId) && PartitionTable(relationId)) if (PartitionedTable(relationId) && PartitionTable(relationId))
{ {
@ -236,7 +236,7 @@ void
ErrorIfAnyPartitionRelationInvolvedInNonInheritedFKey(List *relationIdList) ErrorIfAnyPartitionRelationInvolvedInNonInheritedFKey(List *relationIdList)
{ {
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList) foreach_declared_oid(relationId, relationIdList)
{ {
if (!PartitionTable(relationId)) if (!PartitionTable(relationId))
{ {
@ -300,7 +300,7 @@ bool
RelationIdListHasReferenceTable(List *relationIdList) RelationIdListHasReferenceTable(List *relationIdList)
{ {
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList) foreach_declared_oid(relationId, relationIdList)
{ {
if (IsCitusTableType(relationId, REFERENCE_TABLE)) if (IsCitusTableType(relationId, REFERENCE_TABLE))
{ {
@ -322,7 +322,7 @@ GetFKeyCreationCommandsForRelationIdList(List *relationIdList)
List *fKeyCreationCommands = NIL; List *fKeyCreationCommands = NIL;
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList) foreach_declared_oid(relationId, relationIdList)
{ {
List *relationFKeyCreationCommands = List *relationFKeyCreationCommands =
GetReferencingForeignConstaintCommands(relationId); GetReferencingForeignConstaintCommands(relationId);
@ -342,7 +342,7 @@ static void
DropRelationIdListForeignKeys(List *relationIdList, int fKeyFlags) DropRelationIdListForeignKeys(List *relationIdList, int fKeyFlags)
{ {
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList) foreach_declared_oid(relationId, relationIdList)
{ {
DropRelationForeignKeys(relationId, fKeyFlags); DropRelationForeignKeys(relationId, fKeyFlags);
} }
@ -399,7 +399,7 @@ GetRelationDropFkeyCommands(Oid relationId, int fKeyFlags)
List *relationFKeyIdList = GetForeignKeyOids(relationId, fKeyFlags); List *relationFKeyIdList = GetForeignKeyOids(relationId, fKeyFlags);
Oid foreignKeyId; Oid foreignKeyId;
foreach_oid(foreignKeyId, relationFKeyIdList) foreach_declared_oid(foreignKeyId, relationFKeyIdList)
{ {
char *dropFkeyCascadeCommand = GetDropFkeyCascadeCommand(foreignKeyId); char *dropFkeyCascadeCommand = GetDropFkeyCascadeCommand(foreignKeyId);
dropFkeyCascadeCommandList = lappend(dropFkeyCascadeCommandList, dropFkeyCascadeCommandList = lappend(dropFkeyCascadeCommandList,
@ -450,7 +450,7 @@ ExecuteCascadeOperationForRelationIdList(List *relationIdList,
cascadeOperationType) cascadeOperationType)
{ {
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList) foreach_declared_oid(relationId, relationIdList)
{ {
/* /*
* The reason behind skipping certain table types in below loop is * The reason behind skipping certain table types in below loop is
@ -531,7 +531,7 @@ ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *utilityCommandL
PG_TRY(); PG_TRY();
{ {
char *utilityCommand = NULL; char *utilityCommand = NULL;
foreach_ptr(utilityCommand, utilityCommandList) foreach_declared_ptr(utilityCommand, utilityCommandList)
{ {
/* /*
* CREATE MATERIALIZED VIEW commands need to be parsed/transformed, * CREATE MATERIALIZED VIEW commands need to be parsed/transformed,
@ -569,7 +569,7 @@ void
ExecuteAndLogUtilityCommandList(List *utilityCommandList) ExecuteAndLogUtilityCommandList(List *utilityCommandList)
{ {
char *utilityCommand = NULL; char *utilityCommand = NULL;
foreach_ptr(utilityCommand, utilityCommandList) foreach_declared_ptr(utilityCommand, utilityCommandList)
{ {
ExecuteAndLogUtilityCommand(utilityCommand); ExecuteAndLogUtilityCommand(utilityCommand);
} }
@ -597,7 +597,7 @@ void
ExecuteForeignKeyCreateCommandList(List *ddlCommandList, bool skip_validation) ExecuteForeignKeyCreateCommandList(List *ddlCommandList, bool skip_validation)
{ {
char *ddlCommand = NULL; char *ddlCommand = NULL;
foreach_ptr(ddlCommand, ddlCommandList) foreach_declared_ptr(ddlCommand, ddlCommandList)
{ {
ExecuteForeignKeyCreateCommand(ddlCommand, skip_validation); ExecuteForeignKeyCreateCommand(ddlCommand, skip_validation);
} }

View File

@ -588,7 +588,7 @@ ErrorIfOptionListHasNoTableName(List *optionList)
{ {
char *table_nameString = "table_name"; char *table_nameString = "table_name";
DefElem *option = NULL; DefElem *option = NULL;
foreach_ptr(option, optionList) foreach_declared_ptr(option, optionList)
{ {
char *optionName = option->defname; char *optionName = option->defname;
if (strcmp(optionName, table_nameString) == 0) if (strcmp(optionName, table_nameString) == 0)
@ -613,7 +613,7 @@ ForeignTableDropsTableNameOption(List *optionList)
{ {
char *table_nameString = "table_name"; char *table_nameString = "table_name";
DefElem *option = NULL; DefElem *option = NULL;
foreach_ptr(option, optionList) foreach_declared_ptr(option, optionList)
{ {
char *optionName = option->defname; char *optionName = option->defname;
DefElemAction optionAction = option->defaction; DefElemAction optionAction = option->defaction;
@ -732,7 +732,7 @@ UpdateAutoConvertedForConnectedRelations(List *relationIds, bool autoConverted)
List *relationIdList = NIL; List *relationIdList = NIL;
Oid relid = InvalidOid; Oid relid = InvalidOid;
foreach_oid(relid, relationIds) foreach_declared_oid(relid, relationIds)
{ {
List *connectedRelations = GetForeignKeyConnectedRelationIdList(relid); List *connectedRelations = GetForeignKeyConnectedRelationIdList(relid);
relationIdList = list_concat_unique_oid(relationIdList, connectedRelations); relationIdList = list_concat_unique_oid(relationIdList, connectedRelations);
@ -740,7 +740,7 @@ UpdateAutoConvertedForConnectedRelations(List *relationIds, bool autoConverted)
relationIdList = SortList(relationIdList, CompareOids); relationIdList = SortList(relationIdList, CompareOids);
foreach_oid(relid, relationIdList) foreach_declared_oid(relid, relationIdList)
{ {
UpdatePgDistPartitionAutoConverted(relid, autoConverted); UpdatePgDistPartitionAutoConverted(relid, autoConverted);
} }
@ -776,7 +776,7 @@ GetShellTableDDLEventsForCitusLocalTable(Oid relationId)
List *shellTableDDLEvents = NIL; List *shellTableDDLEvents = NIL;
TableDDLCommand *tableDDLCommand = NULL; TableDDLCommand *tableDDLCommand = NULL;
foreach_ptr(tableDDLCommand, tableDDLCommands) foreach_declared_ptr(tableDDLCommand, tableDDLCommands)
{ {
Assert(CitusIsA(tableDDLCommand, TableDDLCommand)); Assert(CitusIsA(tableDDLCommand, TableDDLCommand));
shellTableDDLEvents = lappend(shellTableDDLEvents, shellTableDDLEvents = lappend(shellTableDDLEvents,
@ -863,7 +863,7 @@ RenameShardRelationConstraints(Oid shardRelationId, uint64 shardId)
List *constraintNameList = GetConstraintNameList(shardRelationId); List *constraintNameList = GetConstraintNameList(shardRelationId);
char *constraintName = NULL; char *constraintName = NULL;
foreach_ptr(constraintName, constraintNameList) foreach_declared_ptr(constraintName, constraintNameList)
{ {
const char *commandString = const char *commandString =
GetRenameShardConstraintCommand(shardRelationId, constraintName, shardId); GetRenameShardConstraintCommand(shardRelationId, constraintName, shardId);
@ -958,7 +958,7 @@ RenameShardRelationIndexes(Oid shardRelationId, uint64 shardId)
List *indexOidList = GetExplicitIndexOidList(shardRelationId); List *indexOidList = GetExplicitIndexOidList(shardRelationId);
Oid indexOid = InvalidOid; Oid indexOid = InvalidOid;
foreach_oid(indexOid, indexOidList) foreach_declared_oid(indexOid, indexOidList)
{ {
const char *commandString = GetRenameShardIndexCommand(indexOid, shardId); const char *commandString = GetRenameShardIndexCommand(indexOid, shardId);
ExecuteAndLogUtilityCommand(commandString); ExecuteAndLogUtilityCommand(commandString);
@ -1008,7 +1008,7 @@ RenameShardRelationStatistics(Oid shardRelationId, uint64 shardId)
List *statsCommandList = GetRenameStatsCommandList(statsOidList, shardId); List *statsCommandList = GetRenameStatsCommandList(statsOidList, shardId);
char *command = NULL; char *command = NULL;
foreach_ptr(command, statsCommandList) foreach_declared_ptr(command, statsCommandList)
{ {
ExecuteAndLogUtilityCommand(command); ExecuteAndLogUtilityCommand(command);
} }
@ -1044,7 +1044,7 @@ RenameShardRelationNonTruncateTriggers(Oid shardRelationId, uint64 shardId)
List *triggerIdList = GetExplicitTriggerIdList(shardRelationId); List *triggerIdList = GetExplicitTriggerIdList(shardRelationId);
Oid triggerId = InvalidOid; Oid triggerId = InvalidOid;
foreach_oid(triggerId, triggerIdList) foreach_declared_oid(triggerId, triggerIdList)
{ {
bool missingOk = false; bool missingOk = false;
HeapTuple triggerTuple = GetTriggerTupleById(triggerId, missingOk); HeapTuple triggerTuple = GetTriggerTupleById(triggerId, missingOk);
@ -1097,7 +1097,7 @@ DropRelationTruncateTriggers(Oid relationId)
List *triggerIdList = GetExplicitTriggerIdList(relationId); List *triggerIdList = GetExplicitTriggerIdList(relationId);
Oid triggerId = InvalidOid; Oid triggerId = InvalidOid;
foreach_oid(triggerId, triggerIdList) foreach_declared_oid(triggerId, triggerIdList)
{ {
bool missingOk = false; bool missingOk = false;
HeapTuple triggerTuple = GetTriggerTupleById(triggerId, missingOk); HeapTuple triggerTuple = GetTriggerTupleById(triggerId, missingOk);
@ -1175,7 +1175,7 @@ DropIdentitiesOnTable(Oid relationId)
relation_close(relation, NoLock); relation_close(relation, NoLock);
char *dropCommand = NULL; char *dropCommand = NULL;
foreach_ptr(dropCommand, dropCommandList) foreach_declared_ptr(dropCommand, dropCommandList)
{ {
/* /*
* We need to disable/enable ddl propagation for this command, to prevent * We need to disable/enable ddl propagation for this command, to prevent
@ -1218,7 +1218,7 @@ DropViewsOnTable(Oid relationId)
List *reverseOrderedViews = ReversedOidList(views); List *reverseOrderedViews = ReversedOidList(views);
Oid viewId = InvalidOid; Oid viewId = InvalidOid;
foreach_oid(viewId, reverseOrderedViews) foreach_declared_oid(viewId, reverseOrderedViews)
{ {
char *qualifiedViewName = generate_qualified_relation_name(viewId); char *qualifiedViewName = generate_qualified_relation_name(viewId);
@ -1241,7 +1241,7 @@ ReversedOidList(List *oidList)
{ {
List *reversed = NIL; List *reversed = NIL;
Oid oid = InvalidOid; Oid oid = InvalidOid;
foreach_oid(oid, oidList) foreach_declared_oid(oid, oidList)
{ {
reversed = lcons_oid(oid, reversed); reversed = lcons_oid(oid, reversed);
} }
@ -1293,7 +1293,7 @@ GetRenameStatsCommandList(List *statsOidList, uint64 shardId)
{ {
List *statsCommandList = NIL; List *statsCommandList = NIL;
Oid statsOid; Oid statsOid;
foreach_oid(statsOid, statsOidList) foreach_declared_oid(statsOid, statsOidList)
{ {
HeapTuple tup = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statsOid)); HeapTuple tup = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statsOid));

View File

@ -115,7 +115,7 @@ static bool
IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt) IsClusterStmtVerbose_compat(ClusterStmt *clusterStmt)
{ {
DefElem *opt = NULL; DefElem *opt = NULL;
foreach_ptr(opt, clusterStmt->params) foreach_declared_ptr(opt, clusterStmt->params)
{ {
if (strcmp(opt->defname, "verbose") == 0) if (strcmp(opt->defname, "verbose") == 0)
{ {

View File

@ -68,8 +68,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
char *collcollate; char *collcollate;
char *collctype; char *collctype;
#if PG_VERSION_NUM >= PG_VERSION_15
/* /*
* In PG15, there is an added option to use ICU as global locale provider. * In PG15, there is an added option to use ICU as global locale provider.
* pg_collation has three locale-related fields: collcollate and collctype, * pg_collation has three locale-related fields: collcollate and collctype,
@ -77,7 +75,7 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
* ICU-related field. Only the libc-related fields or the ICU-related field * ICU-related field. Only the libc-related fields or the ICU-related field
* is set, never both. * is set, never both.
*/ */
char *colliculocale; char *colllocale;
bool isnull; bool isnull;
Datum datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collcollate, Datum datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collcollate,
@ -101,27 +99,17 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
collctype = NULL; collctype = NULL;
} }
datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_colliculocale, &isnull); datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_colllocale, &isnull);
if (!isnull) if (!isnull)
{ {
colliculocale = TextDatumGetCString(datum); colllocale = TextDatumGetCString(datum);
} }
else else
{ {
colliculocale = NULL; colllocale = NULL;
} }
Assert((collcollate && collctype) || colliculocale); Assert((collcollate && collctype) || colllocale);
#else
/*
* In versions before 15, collcollate and collctype were type "name". Use
* pstrdup() to match the interface of 15 so that we consistently free the
* result later.
*/
collcollate = pstrdup(NameStr(collationForm->collcollate));
collctype = pstrdup(NameStr(collationForm->collctype));
#endif
if (collowner != NULL) if (collowner != NULL)
{ {
@ -132,6 +120,7 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
char *schemaName = get_namespace_name(collnamespace); char *schemaName = get_namespace_name(collnamespace);
*quotedCollationName = quote_qualified_identifier(schemaName, collname); *quotedCollationName = quote_qualified_identifier(schemaName, collname);
const char *providerString = const char *providerString =
collprovider == COLLPROVIDER_BUILTIN ? "builtin" :
collprovider == COLLPROVIDER_DEFAULT ? "default" : collprovider == COLLPROVIDER_DEFAULT ? "default" :
collprovider == COLLPROVIDER_ICU ? "icu" : collprovider == COLLPROVIDER_ICU ? "icu" :
collprovider == COLLPROVIDER_LIBC ? "libc" : NULL; collprovider == COLLPROVIDER_LIBC ? "libc" : NULL;
@ -146,13 +135,12 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
"CREATE COLLATION %s (provider = '%s'", "CREATE COLLATION %s (provider = '%s'",
*quotedCollationName, providerString); *quotedCollationName, providerString);
#if PG_VERSION_NUM >= PG_VERSION_15 if (colllocale)
if (colliculocale)
{ {
appendStringInfo(&collationNameDef, appendStringInfo(&collationNameDef,
", locale = %s", ", locale = %s",
quote_literal_cstr(colliculocale)); quote_literal_cstr(colllocale));
pfree(colliculocale); pfree(colllocale);
} }
else else
{ {
@ -172,24 +160,7 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
pfree(collcollate); pfree(collcollate);
pfree(collctype); pfree(collctype);
} }
#else
if (strcmp(collcollate, collctype) == 0)
{
appendStringInfo(&collationNameDef,
", locale = %s",
quote_literal_cstr(collcollate));
}
else
{
appendStringInfo(&collationNameDef,
", lc_collate = %s, lc_ctype = %s",
quote_literal_cstr(collcollate),
quote_literal_cstr(collctype));
}
pfree(collcollate);
pfree(collctype);
#endif
#if PG_VERSION_NUM >= PG_VERSION_16 #if PG_VERSION_NUM >= PG_VERSION_16
char *collicurules = NULL; char *collicurules = NULL;
datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collicurules, &isnull); datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collicurules, &isnull);

View File

@ -235,7 +235,7 @@ PreprocessDropDistributedObjectStmt(Node *node, const char *queryString,
List *distributedObjects = NIL; List *distributedObjects = NIL;
List *distributedObjectAddresses = NIL; List *distributedObjectAddresses = NIL;
Node *object = NULL; Node *object = NULL;
foreach_ptr(object, stmt->objects) foreach_declared_ptr(object, stmt->objects)
{ {
/* TODO understand if the lock should be sth else */ /* TODO understand if the lock should be sth else */
Relation rel = NULL; /* not used, but required to pass to get_object_address */ Relation rel = NULL; /* not used, but required to pass to get_object_address */
@ -267,7 +267,7 @@ PreprocessDropDistributedObjectStmt(Node *node, const char *queryString,
* remove the entries for the distributed objects on dropping * remove the entries for the distributed objects on dropping
*/ */
ObjectAddress *address = NULL; ObjectAddress *address = NULL;
foreach_ptr(address, distributedObjectAddresses) foreach_declared_ptr(address, distributedObjectAddresses)
{ {
UnmarkObjectDistributed(address); UnmarkObjectDistributed(address);
} }
@ -303,7 +303,7 @@ DropTextSearchDictObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
List *objectAddresses = NIL; List *objectAddresses = NIL;
List *objNameList = NIL; List *objNameList = NIL;
foreach_ptr(objNameList, stmt->objects) foreach_declared_ptr(objNameList, stmt->objects)
{ {
Oid tsdictOid = get_ts_dict_oid(objNameList, missing_ok); Oid tsdictOid = get_ts_dict_oid(objNameList, missing_ok);
@ -328,7 +328,7 @@ DropTextSearchConfigObjectAddress(Node *node, bool missing_ok, bool isPostproces
List *objectAddresses = NIL; List *objectAddresses = NIL;
List *objNameList = NIL; List *objNameList = NIL;
foreach_ptr(objNameList, stmt->objects) foreach_declared_ptr(objNameList, stmt->objects)
{ {
Oid tsconfigOid = get_ts_config_oid(objNameList, missing_ok); Oid tsconfigOid = get_ts_config_oid(objNameList, missing_ok);

View File

@ -170,12 +170,10 @@ static void EnsureDistributedSequencesHaveOneType(Oid relationId,
static void CopyLocalDataIntoShards(Oid distributedTableId); static void CopyLocalDataIntoShards(Oid distributedTableId);
static List * TupleDescColumnNameList(TupleDesc tupleDescriptor); static List * TupleDescColumnNameList(TupleDesc tupleDescriptor);
#if (PG_VERSION_NUM >= PG_VERSION_15)
static bool DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc, static bool DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc,
Var *distributionColumn); Var *distributionColumn);
static int numeric_typmod_scale(int32 typmod); static int numeric_typmod_scale(int32 typmod);
static bool is_valid_numeric_typmod(int32 typmod); static bool is_valid_numeric_typmod(int32 typmod);
#endif
static bool DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc, static bool DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc,
Var *distributionColumn); Var *distributionColumn);
@ -834,7 +832,7 @@ HashSplitPointsForShardList(List *shardList)
List *splitPointList = NIL; List *splitPointList = NIL;
ShardInterval *shardInterval = NULL; ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardList) foreach_declared_ptr(shardInterval, shardList)
{ {
int32 shardMaxValue = DatumGetInt32(shardInterval->maxValue); int32 shardMaxValue = DatumGetInt32(shardInterval->maxValue);
@ -890,7 +888,7 @@ WorkerNodesForShardList(List *shardList)
List *nodeIdList = NIL; List *nodeIdList = NIL;
ShardInterval *shardInterval = NULL; ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardList) foreach_declared_ptr(shardInterval, shardList)
{ {
WorkerNode *workerNode = ActiveShardPlacementWorkerNode(shardInterval->shardId); WorkerNode *workerNode = ActiveShardPlacementWorkerNode(shardInterval->shardId);
nodeIdList = lappend_int(nodeIdList, workerNode->nodeId); nodeIdList = lappend_int(nodeIdList, workerNode->nodeId);
@ -1337,7 +1335,7 @@ CreateCitusTable(Oid relationId, CitusTableType tableType,
ALLOCSET_DEFAULT_SIZES); ALLOCSET_DEFAULT_SIZES);
MemoryContext oldContext = MemoryContextSwitchTo(citusPartitionContext); MemoryContext oldContext = MemoryContextSwitchTo(citusPartitionContext);
foreach_oid(partitionRelationId, partitionList) foreach_declared_oid(partitionRelationId, partitionList)
{ {
MemoryContextReset(citusPartitionContext); MemoryContextReset(citusPartitionContext);
@ -1551,7 +1549,7 @@ ConvertCitusLocalTableToTableType(Oid relationId, CitusTableType tableType,
MemoryContext oldContext = MemoryContextSwitchTo(citusPartitionContext); MemoryContext oldContext = MemoryContextSwitchTo(citusPartitionContext);
Oid partitionRelationId = InvalidOid; Oid partitionRelationId = InvalidOid;
foreach_oid(partitionRelationId, partitionList) foreach_declared_oid(partitionRelationId, partitionList)
{ {
MemoryContextReset(citusPartitionContext); MemoryContextReset(citusPartitionContext);
@ -1701,7 +1699,7 @@ EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId, Oid ownerRelationId
Oid attrDefOid; Oid attrDefOid;
List *attrDefOids = GetAttrDefsFromSequence(seqOid); List *attrDefOids = GetAttrDefsFromSequence(seqOid);
foreach_oid(attrDefOid, attrDefOids) foreach_declared_oid(attrDefOid, attrDefOids)
{ {
ObjectAddress columnAddress = GetAttrDefaultColumnAddress(attrDefOid); ObjectAddress columnAddress = GetAttrDefaultColumnAddress(attrDefOid);
@ -1783,7 +1781,7 @@ static void
EnsureDistributedSequencesHaveOneType(Oid relationId, List *seqInfoList) EnsureDistributedSequencesHaveOneType(Oid relationId, List *seqInfoList)
{ {
SequenceInfo *seqInfo = NULL; SequenceInfo *seqInfo = NULL;
foreach_ptr(seqInfo, seqInfoList) foreach_declared_ptr(seqInfo, seqInfoList)
{ {
if (!seqInfo->isNextValDefault) if (!seqInfo->isNextValDefault)
{ {
@ -2114,8 +2112,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
"AS (...) STORED."))); "AS (...) STORED.")));
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* verify target relation is not distributed by a column of type numeric with negative scale */ /* verify target relation is not distributed by a column of type numeric with negative scale */
if (distributionMethod != DISTRIBUTE_BY_NONE && if (distributionMethod != DISTRIBUTE_BY_NONE &&
DistributionColumnUsesNumericColumnNegativeScale(relationDesc, DistributionColumnUsesNumericColumnNegativeScale(relationDesc,
@ -2126,7 +2122,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
errdetail("Distribution column must not use numeric type " errdetail("Distribution column must not use numeric type "
"with negative scale"))); "with negative scale")));
} }
#endif
/* check for support function needed by specified partition method */ /* check for support function needed by specified partition method */
if (distributionMethod == DISTRIBUTE_BY_HASH) if (distributionMethod == DISTRIBUTE_BY_HASH)
@ -2732,11 +2727,15 @@ CopyFromLocalTableIntoDistTable(Oid localTableId, Oid distributedTableId)
ExprContext *econtext = GetPerTupleExprContext(estate); ExprContext *econtext = GetPerTupleExprContext(estate);
econtext->ecxt_scantuple = slot; econtext->ecxt_scantuple = slot;
const bool nonPublishableData = false; const bool nonPublishableData = false;
/* we don't track query counters when distributing a table */
const bool trackQueryCounters = false;
DestReceiver *copyDest = DestReceiver *copyDest =
(DestReceiver *) CreateCitusCopyDestReceiver(distributedTableId, (DestReceiver *) CreateCitusCopyDestReceiver(distributedTableId,
columnNameList, columnNameList,
partitionColumnIndex, partitionColumnIndex,
estate, NULL, nonPublishableData); estate, NULL, nonPublishableData,
trackQueryCounters);
/* initialise state for writing to shards, we'll open connections on demand */ /* initialise state for writing to shards, we'll open connections on demand */
copyDest->rStartup(copyDest, 0, sourceTupleDescriptor); copyDest->rStartup(copyDest, 0, sourceTupleDescriptor);
@ -2844,8 +2843,6 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor)
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* is_valid_numeric_typmod checks if the typmod value is valid * is_valid_numeric_typmod checks if the typmod value is valid
* *
@ -2895,8 +2892,6 @@ DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc,
} }
#endif
/* /*
* DistributionColumnUsesGeneratedStoredColumn returns whether a given relation uses * DistributionColumnUsesGeneratedStoredColumn returns whether a given relation uses
* GENERATED ALWAYS AS (...) STORED on distribution column * GENERATED ALWAYS AS (...) STORED on distribution column

View File

@ -35,6 +35,7 @@
#include "distributed/adaptive_executor.h" #include "distributed/adaptive_executor.h"
#include "distributed/commands.h" #include "distributed/commands.h"
#include "distributed/commands/serialize_distributed_ddls.h"
#include "distributed/commands/utility_hook.h" #include "distributed/commands/utility_hook.h"
#include "distributed/comment.h" #include "distributed/comment.h"
#include "distributed/deparse_shard_query.h" #include "distributed/deparse_shard_query.h"
@ -46,7 +47,6 @@
#include "distributed/metadata_utility.h" #include "distributed/metadata_utility.h"
#include "distributed/multi_executor.h" #include "distributed/multi_executor.h"
#include "distributed/relation_access_tracking.h" #include "distributed/relation_access_tracking.h"
#include "distributed/serialize_distributed_ddls.h"
#include "distributed/shard_cleaner.h" #include "distributed/shard_cleaner.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
#include "distributed/worker_transaction.h" #include "distributed/worker_transaction.h"
@ -79,11 +79,8 @@ typedef struct DatabaseCollationInfo
{ {
char *datcollate; char *datcollate;
char *datctype; char *datctype;
#if PG_VERSION_NUM >= PG_VERSION_15
char *daticulocale; char *daticulocale;
char *datcollversion; char *datcollversion;
#endif
#if PG_VERSION_NUM >= PG_VERSION_16 #if PG_VERSION_NUM >= PG_VERSION_16
char *daticurules; char *daticurules;
@ -94,9 +91,7 @@ static char * GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database
databaseForm); databaseForm);
static DatabaseCollationInfo GetDatabaseCollation(Oid dbOid); static DatabaseCollationInfo GetDatabaseCollation(Oid dbOid);
static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid); static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid);
#if PG_VERSION_NUM >= PG_VERSION_15
static char * GetLocaleProviderString(char datlocprovider); static char * GetLocaleProviderString(char datlocprovider);
#endif
static char * GetTablespaceName(Oid tablespaceOid); static char * GetTablespaceName(Oid tablespaceOid);
static ObjectAddress * GetDatabaseAddressFromDatabaseName(char *databaseName, static ObjectAddress * GetDatabaseAddressFromDatabaseName(char *databaseName,
bool missingOk); bool missingOk);
@ -235,7 +230,7 @@ FilterDistributedDatabases(List *databases)
{ {
List *distributedDatabases = NIL; List *distributedDatabases = NIL;
String *databaseName = NULL; String *databaseName = NULL;
foreach_ptr(databaseName, databases) foreach_declared_ptr(databaseName, databases)
{ {
bool missingOk = true; bool missingOk = true;
ObjectAddress *dbAddress = ObjectAddress *dbAddress =
@ -258,7 +253,7 @@ static bool
IsSetTablespaceStatement(AlterDatabaseStmt *stmt) IsSetTablespaceStatement(AlterDatabaseStmt *stmt)
{ {
DefElem *def = NULL; DefElem *def = NULL;
foreach_ptr(def, stmt->options) foreach_declared_ptr(def, stmt->options)
{ {
if (strcmp(def->defname, "tablespace") == 0) if (strcmp(def->defname, "tablespace") == 0)
{ {
@ -320,8 +315,6 @@ PreprocessAlterDatabaseStmt(Node *node, const char *queryString,
} }
#if PG_VERSION_NUM >= PG_VERSION_15
/* /*
* PreprocessAlterDatabaseRefreshCollStmt is executed before the statement is applied to * PreprocessAlterDatabaseRefreshCollStmt is executed before the statement is applied to
* the local postgres instance. * the local postgres instance.
@ -359,9 +352,6 @@ PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString,
} }
#endif
/* /*
* PreprocessAlterDatabaseRenameStmt is executed before the statement is applied to * PreprocessAlterDatabaseRenameStmt is executed before the statement is applied to
* the local postgres instance. * the local postgres instance.
@ -510,7 +500,7 @@ PreprocessCreateDatabaseStmt(Node *node, const char *queryString,
List *remoteNodes = TargetWorkerSetNodeList(ALL_SHARD_NODES, RowShareLock); List *remoteNodes = TargetWorkerSetNodeList(ALL_SHARD_NODES, RowShareLock);
WorkerNode *remoteNode = NULL; WorkerNode *remoteNode = NULL;
foreach_ptr(remoteNode, remoteNodes) foreach_declared_ptr(remoteNode, remoteNodes)
{ {
InsertCleanupRecordOutsideTransaction( InsertCleanupRecordOutsideTransaction(
CLEANUP_OBJECT_DATABASE, CLEANUP_OBJECT_DATABASE,
@ -733,7 +723,7 @@ void
EnsureSupportedCreateDatabaseCommand(CreatedbStmt *stmt) EnsureSupportedCreateDatabaseCommand(CreatedbStmt *stmt)
{ {
DefElem *option = NULL; DefElem *option = NULL;
foreach_ptr(option, stmt->options) foreach_declared_ptr(option, stmt->options)
{ {
if (strcmp(option->defname, "oid") == 0) if (strcmp(option->defname, "oid") == 0)
{ {
@ -849,9 +839,7 @@ GetDatabaseCollation(Oid dbOid)
Datum ctypeDatum = heap_getattr(tup, Anum_pg_database_datctype, tupdesc, &isNull); Datum ctypeDatum = heap_getattr(tup, Anum_pg_database_datctype, tupdesc, &isNull);
info.datctype = TextDatumGetCString(ctypeDatum); info.datctype = TextDatumGetCString(ctypeDatum);
#if PG_VERSION_NUM >= PG_VERSION_15 Datum icuLocaleDatum = heap_getattr(tup, Anum_pg_database_datlocale, tupdesc,
Datum icuLocaleDatum = heap_getattr(tup, Anum_pg_database_daticulocale, tupdesc,
&isNull); &isNull);
if (!isNull) if (!isNull)
{ {
@ -864,7 +852,6 @@ GetDatabaseCollation(Oid dbOid)
{ {
info.datcollversion = TextDatumGetCString(collverDatum); info.datcollversion = TextDatumGetCString(collverDatum);
} }
#endif
#if PG_VERSION_NUM >= PG_VERSION_16 #if PG_VERSION_NUM >= PG_VERSION_16
Datum icurulesDatum = heap_getattr(tup, Anum_pg_database_daticurules, tupdesc, Datum icurulesDatum = heap_getattr(tup, Anum_pg_database_daticurules, tupdesc,
@ -882,8 +869,6 @@ GetDatabaseCollation(Oid dbOid)
} }
#if PG_VERSION_NUM >= PG_VERSION_15
/* /*
* GetLocaleProviderString gets the datlocprovider stored in pg_database * GetLocaleProviderString gets the datlocprovider stored in pg_database
* and returns the string representation of the datlocprovider * and returns the string representation of the datlocprovider
@ -912,9 +897,6 @@ GetLocaleProviderString(char datlocprovider)
} }
#endif
/* /*
* GenerateCreateDatabaseStatementFromPgDatabase gets the pg_database tuple and returns the * GenerateCreateDatabaseStatementFromPgDatabase gets the pg_database tuple and returns the
* CREATE DATABASE statement that can be used to create given database. * CREATE DATABASE statement that can be used to create given database.
@ -956,7 +938,6 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm)
appendStringInfo(&str, " ENCODING = %s", appendStringInfo(&str, " ENCODING = %s",
quote_literal_cstr(pg_encoding_to_char(databaseForm->encoding))); quote_literal_cstr(pg_encoding_to_char(databaseForm->encoding)));
#if PG_VERSION_NUM >= PG_VERSION_15
if (collInfo.datcollversion != NULL) if (collInfo.datcollversion != NULL)
{ {
appendStringInfo(&str, " COLLATION_VERSION = %s", appendStringInfo(&str, " COLLATION_VERSION = %s",
@ -972,7 +953,6 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm)
appendStringInfo(&str, " LOCALE_PROVIDER = %s", appendStringInfo(&str, " LOCALE_PROVIDER = %s",
quote_identifier(GetLocaleProviderString( quote_identifier(GetLocaleProviderString(
databaseForm->datlocprovider))); databaseForm->datlocprovider)));
#endif
#if PG_VERSION_NUM >= PG_VERSION_16 #if PG_VERSION_NUM >= PG_VERSION_16
if (collInfo.daticurules != NULL) if (collInfo.daticurules != NULL)

View File

@ -162,7 +162,7 @@ EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target,
} }
ObjectAddress *object = NULL; ObjectAddress *object = NULL;
foreach_ptr(object, objectsToBeCreated) foreach_declared_ptr(object, objectsToBeCreated)
{ {
List *dependencyCommands = GetDependencyCreateDDLCommands(object); List *dependencyCommands = GetDependencyCreateDDLCommands(object);
ddlCommands = list_concat(ddlCommands, dependencyCommands); ddlCommands = list_concat(ddlCommands, dependencyCommands);
@ -201,7 +201,7 @@ EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target,
*/ */
List *addressSortedDependencies = SortList(objectsWithCommands, List *addressSortedDependencies = SortList(objectsWithCommands,
ObjectAddressComparator); ObjectAddressComparator);
foreach_ptr(object, addressSortedDependencies) foreach_declared_ptr(object, addressSortedDependencies)
{ {
LockDatabaseObject(object->classId, object->objectId, LockDatabaseObject(object->classId, object->objectId,
object->objectSubId, ExclusiveLock); object->objectSubId, ExclusiveLock);
@ -240,7 +240,7 @@ EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target,
else else
{ {
WorkerNode *workerNode = NULL; WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, remoteNodeList) foreach_declared_ptr(workerNode, remoteNodeList)
{ {
const char *nodeName = workerNode->workerName; const char *nodeName = workerNode->workerName;
uint32 nodePort = workerNode->workerPort; uint32 nodePort = workerNode->workerPort;
@ -256,7 +256,7 @@ EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target,
* that objects have been created on remote nodes before marking them * that objects have been created on remote nodes before marking them
* distributed, so MarkObjectDistributed wouldn't fail. * distributed, so MarkObjectDistributed wouldn't fail.
*/ */
foreach_ptr(object, objectsWithCommands) foreach_declared_ptr(object, objectsWithCommands)
{ {
/* /*
* pg_dist_object entries must be propagated with the super user, since * pg_dist_object entries must be propagated with the super user, since
@ -279,7 +279,7 @@ void
EnsureAllObjectDependenciesExistOnAllNodes(const List *targets) EnsureAllObjectDependenciesExistOnAllNodes(const List *targets)
{ {
ObjectAddress *target = NULL; ObjectAddress *target = NULL;
foreach_ptr(target, targets) foreach_declared_ptr(target, targets)
{ {
EnsureDependenciesExistOnAllNodes(target); EnsureDependenciesExistOnAllNodes(target);
} }
@ -336,7 +336,7 @@ DeferErrorIfCircularDependencyExists(const ObjectAddress *objectAddress)
List *dependencies = GetAllDependenciesForObject(objectAddress); List *dependencies = GetAllDependenciesForObject(objectAddress);
ObjectAddress *dependency = NULL; ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies) foreach_declared_ptr(dependency, dependencies)
{ {
if (dependency->classId == objectAddress->classId && if (dependency->classId == objectAddress->classId &&
dependency->objectId == objectAddress->objectId && dependency->objectId == objectAddress->objectId &&
@ -424,7 +424,7 @@ GetDistributableDependenciesForObject(const ObjectAddress *target)
/* filter the ones that can be distributed */ /* filter the ones that can be distributed */
ObjectAddress *dependency = NULL; ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies) foreach_declared_ptr(dependency, dependencies)
{ {
/* /*
* TODO: maybe we can optimize the logic applied in below line. Actually we * TODO: maybe we can optimize the logic applied in below line. Actually we
@ -508,7 +508,7 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
INCLUDE_IDENTITY, INCLUDE_IDENTITY,
creatingShellTableOnRemoteNode); creatingShellTableOnRemoteNode);
TableDDLCommand *tableDDLCommand = NULL; TableDDLCommand *tableDDLCommand = NULL;
foreach_ptr(tableDDLCommand, tableDDLCommands) foreach_declared_ptr(tableDDLCommand, tableDDLCommands)
{ {
Assert(CitusIsA(tableDDLCommand, TableDDLCommand)); Assert(CitusIsA(tableDDLCommand, TableDDLCommand));
commandList = lappend(commandList, GetTableDDLCommand( commandList = lappend(commandList, GetTableDDLCommand(
@ -683,7 +683,7 @@ GetAllDependencyCreateDDLCommands(const List *dependencies)
List *commands = NIL; List *commands = NIL;
ObjectAddress *dependency = NULL; ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies) foreach_declared_ptr(dependency, dependencies)
{ {
commands = list_concat(commands, GetDependencyCreateDDLCommands(dependency)); commands = list_concat(commands, GetDependencyCreateDDLCommands(dependency));
} }
@ -831,7 +831,7 @@ bool
ShouldPropagateAnyObject(List *addresses) ShouldPropagateAnyObject(List *addresses)
{ {
ObjectAddress *address = NULL; ObjectAddress *address = NULL;
foreach_ptr(address, addresses) foreach_declared_ptr(address, addresses)
{ {
if (ShouldPropagateObject(address)) if (ShouldPropagateObject(address))
{ {
@ -853,7 +853,7 @@ FilterObjectAddressListByPredicate(List *objectAddressList, AddressPredicate pre
List *result = NIL; List *result = NIL;
ObjectAddress *address = NULL; ObjectAddress *address = NULL;
foreach_ptr(address, objectAddressList) foreach_declared_ptr(address, objectAddressList)
{ {
if (predicate(address)) if (predicate(address))
{ {

View File

@ -399,10 +399,37 @@ static DistributeObjectOps Any_Rename = {
.markDistributed = false, .markDistributed = false,
}; };
static DistributeObjectOps Any_SecLabel = { static DistributeObjectOps Any_SecLabel = {
.deparse = DeparseSecLabelStmt, .deparse = NULL,
.qualify = NULL, .qualify = NULL,
.preprocess = NULL, .preprocess = NULL,
.postprocess = PostprocessSecLabelStmt, .postprocess = PostprocessAnySecLabelStmt,
.operationType = DIST_OPS_ALTER,
.address = SecLabelStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps Role_SecLabel = {
.deparse = DeparseRoleSecLabelStmt,
.qualify = NULL,
.preprocess = NULL,
.postprocess = PostprocessRoleSecLabelStmt,
.operationType = DIST_OPS_ALTER,
.address = SecLabelStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps Table_SecLabel = {
.deparse = DeparseTableSecLabelStmt,
.qualify = NULL,
.preprocess = NULL,
.postprocess = PostprocessTableOrColumnSecLabelStmt,
.operationType = DIST_OPS_ALTER,
.address = SecLabelStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps Column_SecLabel = {
.deparse = DeparseColumnSecLabelStmt,
.qualify = NULL,
.preprocess = NULL,
.postprocess = PostprocessTableOrColumnSecLabelStmt,
.operationType = DIST_OPS_ALTER, .operationType = DIST_OPS_ALTER,
.address = SecLabelStmtObjectAddress, .address = SecLabelStmtObjectAddress,
.markDistributed = false, .markDistributed = false,
@ -521,7 +548,6 @@ static DistributeObjectOps Database_Drop = {
.markDistributed = false, .markDistributed = false,
}; };
#if PG_VERSION_NUM >= PG_VERSION_15
static DistributeObjectOps Database_RefreshColl = { static DistributeObjectOps Database_RefreshColl = {
.deparse = DeparseAlterDatabaseRefreshCollStmt, .deparse = DeparseAlterDatabaseRefreshCollStmt,
.qualify = NULL, .qualify = NULL,
@ -532,7 +558,6 @@ static DistributeObjectOps Database_RefreshColl = {
.address = NULL, .address = NULL,
.markDistributed = false, .markDistributed = false,
}; };
#endif
static DistributeObjectOps Database_Set = { static DistributeObjectOps Database_Set = {
.deparse = DeparseAlterDatabaseSetStmt, .deparse = DeparseAlterDatabaseSetStmt,
@ -926,7 +951,6 @@ static DistributeObjectOps Sequence_AlterOwner = {
.address = AlterSequenceOwnerStmtObjectAddress, .address = AlterSequenceOwnerStmtObjectAddress,
.markDistributed = false, .markDistributed = false,
}; };
#if (PG_VERSION_NUM >= PG_VERSION_15)
static DistributeObjectOps Sequence_AlterPersistence = { static DistributeObjectOps Sequence_AlterPersistence = {
.deparse = DeparseAlterSequencePersistenceStmt, .deparse = DeparseAlterSequencePersistenceStmt,
.qualify = QualifyAlterSequencePersistenceStmt, .qualify = QualifyAlterSequencePersistenceStmt,
@ -936,7 +960,6 @@ static DistributeObjectOps Sequence_AlterPersistence = {
.address = AlterSequencePersistenceStmtObjectAddress, .address = AlterSequencePersistenceStmtObjectAddress,
.markDistributed = false, .markDistributed = false,
}; };
#endif
static DistributeObjectOps Sequence_Drop = { static DistributeObjectOps Sequence_Drop = {
.deparse = DeparseDropSequenceStmt, .deparse = DeparseDropSequenceStmt,
.qualify = QualifyDropSequenceStmt, .qualify = QualifyDropSequenceStmt,
@ -1393,7 +1416,7 @@ static DistributeObjectOps View_Rename = {
static DistributeObjectOps Trigger_Rename = { static DistributeObjectOps Trigger_Rename = {
.deparse = NULL, .deparse = NULL,
.qualify = NULL, .qualify = NULL,
.preprocess = PreprocessAlterTriggerRenameStmt, .preprocess = NULL,
.operationType = DIST_OPS_ALTER, .operationType = DIST_OPS_ALTER,
.postprocess = PostprocessAlterTriggerRenameStmt, .postprocess = PostprocessAlterTriggerRenameStmt,
.address = NULL, .address = NULL,
@ -1425,14 +1448,11 @@ GetDistributeObjectOps(Node *node)
return &Database_Drop; return &Database_Drop;
} }
#if PG_VERSION_NUM >= PG_VERSION_15
case T_AlterDatabaseRefreshCollStmt: case T_AlterDatabaseRefreshCollStmt:
{ {
return &Database_RefreshColl; return &Database_RefreshColl;
} }
#endif
case T_AlterDatabaseSetStmt: case T_AlterDatabaseSetStmt:
{ {
return &Database_Set; return &Database_Set;
@ -1723,7 +1743,6 @@ GetDistributeObjectOps(Node *node)
case OBJECT_SEQUENCE: case OBJECT_SEQUENCE:
{ {
#if (PG_VERSION_NUM >= PG_VERSION_15)
ListCell *cmdCell = NULL; ListCell *cmdCell = NULL;
foreach(cmdCell, stmt->cmds) foreach(cmdCell, stmt->cmds)
{ {
@ -1751,7 +1770,6 @@ GetDistributeObjectOps(Node *node)
} }
} }
} }
#endif
/* /*
* Prior to PG15, the only Alter Table statement * Prior to PG15, the only Alter Table statement
@ -2128,7 +2146,27 @@ GetDistributeObjectOps(Node *node)
case T_SecLabelStmt: case T_SecLabelStmt:
{ {
return &Any_SecLabel; SecLabelStmt *stmt = castNode(SecLabelStmt, node);
switch (stmt->objtype)
{
case OBJECT_ROLE:
{
return &Role_SecLabel;
}
case OBJECT_TABLE:
{
return &Table_SecLabel;
}
case OBJECT_COLUMN:
{
return &Column_SecLabel;
}
default:
return &Any_SecLabel;
}
} }
case T_RenameStmt: case T_RenameStmt:

View File

@ -210,7 +210,7 @@ MakeCollateClauseFromOid(Oid collationOid)
getObjectIdentityParts(&collateAddress, &objName, &objArgs, false); getObjectIdentityParts(&collateAddress, &objName, &objArgs, false);
char *name = NULL; char *name = NULL;
foreach_ptr(name, objName) foreach_declared_ptr(name, objName)
{ {
collateClause->collname = lappend(collateClause->collname, makeString(name)); collateClause->collname = lappend(collateClause->collname, makeString(name));
} }

View File

@ -274,7 +274,7 @@ PreprocessDropExtensionStmt(Node *node, const char *queryString,
/* unmark each distributed extension */ /* unmark each distributed extension */
ObjectAddress *address = NULL; ObjectAddress *address = NULL;
foreach_ptr(address, distributedExtensionAddresses) foreach_declared_ptr(address, distributedExtensionAddresses)
{ {
UnmarkObjectDistributed(address); UnmarkObjectDistributed(address);
} }
@ -313,7 +313,7 @@ FilterDistributedExtensions(List *extensionObjectList)
List *extensionNameList = NIL; List *extensionNameList = NIL;
String *objectName = NULL; String *objectName = NULL;
foreach_ptr(objectName, extensionObjectList) foreach_declared_ptr(objectName, extensionObjectList)
{ {
const char *extensionName = strVal(objectName); const char *extensionName = strVal(objectName);
const bool missingOk = true; const bool missingOk = true;
@ -351,7 +351,7 @@ ExtensionNameListToObjectAddressList(List *extensionObjectList)
List *extensionObjectAddressList = NIL; List *extensionObjectAddressList = NIL;
String *objectName; String *objectName;
foreach_ptr(objectName, extensionObjectList) foreach_declared_ptr(objectName, extensionObjectList)
{ {
/* /*
* We set missingOk to false as we assume all the objects in * We set missingOk to false as we assume all the objects in
@ -527,7 +527,7 @@ MarkExistingObjectDependenciesDistributedIfSupported()
List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE); List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE);
Oid citusTableId = InvalidOid; Oid citusTableId = InvalidOid;
foreach_oid(citusTableId, citusTableIdList) foreach_declared_oid(citusTableId, citusTableIdList)
{ {
if (!ShouldMarkRelationDistributed(citusTableId)) if (!ShouldMarkRelationDistributed(citusTableId))
{ {
@ -571,7 +571,7 @@ MarkExistingObjectDependenciesDistributedIfSupported()
*/ */
List *viewList = GetAllViews(); List *viewList = GetAllViews();
Oid viewOid = InvalidOid; Oid viewOid = InvalidOid;
foreach_oid(viewOid, viewList) foreach_declared_oid(viewOid, viewList)
{ {
if (!ShouldMarkRelationDistributed(viewOid)) if (!ShouldMarkRelationDistributed(viewOid))
{ {
@ -605,7 +605,7 @@ MarkExistingObjectDependenciesDistributedIfSupported()
List *distributedObjectAddressList = GetDistributedObjectAddressList(); List *distributedObjectAddressList = GetDistributedObjectAddressList();
ObjectAddress *distributedObjectAddress = NULL; ObjectAddress *distributedObjectAddress = NULL;
foreach_ptr(distributedObjectAddress, distributedObjectAddressList) foreach_declared_ptr(distributedObjectAddress, distributedObjectAddressList)
{ {
List *distributableDependencyObjectAddresses = List *distributableDependencyObjectAddresses =
GetDistributableDependenciesForObject(distributedObjectAddress); GetDistributableDependenciesForObject(distributedObjectAddress);
@ -627,7 +627,7 @@ MarkExistingObjectDependenciesDistributedIfSupported()
SetLocalEnableMetadataSync(false); SetLocalEnableMetadataSync(false);
ObjectAddress *objectAddress = NULL; ObjectAddress *objectAddress = NULL;
foreach_ptr(objectAddress, uniqueObjectAddresses) foreach_declared_ptr(objectAddress, uniqueObjectAddresses)
{ {
MarkObjectDistributed(objectAddress); MarkObjectDistributed(objectAddress);
} }
@ -831,7 +831,7 @@ IsDropCitusExtensionStmt(Node *parseTree)
/* now that we have a DropStmt, check if citus extension is among the objects to dropped */ /* now that we have a DropStmt, check if citus extension is among the objects to dropped */
String *objectName; String *objectName;
foreach_ptr(objectName, dropStmt->objects) foreach_declared_ptr(objectName, dropStmt->objects)
{ {
const char *extensionName = strVal(objectName); const char *extensionName = strVal(objectName);
@ -1061,7 +1061,7 @@ GenerateGrantCommandsOnExtensionDependentFDWs(Oid extensionId)
List *FDWOids = GetDependentFDWsToExtension(extensionId); List *FDWOids = GetDependentFDWsToExtension(extensionId);
Oid FDWOid = InvalidOid; Oid FDWOid = InvalidOid;
foreach_oid(FDWOid, FDWOids) foreach_declared_oid(FDWOid, FDWOids)
{ {
Acl *aclEntry = GetPrivilegesForFDW(FDWOid); Acl *aclEntry = GetPrivilegesForFDW(FDWOid);

View File

@ -202,7 +202,7 @@ ErrorIfUnsupportedForeignConstraintExists(Relation relation, char referencingDis
List *foreignKeyOids = GetForeignKeyOids(referencingTableId, flags); List *foreignKeyOids = GetForeignKeyOids(referencingTableId, flags);
Oid foreignKeyOid = InvalidOid; Oid foreignKeyOid = InvalidOid;
foreach_oid(foreignKeyOid, foreignKeyOids) foreach_declared_oid(foreignKeyOid, foreignKeyOids)
{ {
HeapTuple heapTuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(foreignKeyOid)); HeapTuple heapTuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(foreignKeyOid));
@ -414,7 +414,7 @@ ForeignKeySetsNextValColumnToDefault(HeapTuple pgConstraintTuple)
List *setDefaultAttrs = ForeignKeyGetDefaultingAttrs(pgConstraintTuple); List *setDefaultAttrs = ForeignKeyGetDefaultingAttrs(pgConstraintTuple);
AttrNumber setDefaultAttr = InvalidAttrNumber; AttrNumber setDefaultAttr = InvalidAttrNumber;
foreach_int(setDefaultAttr, setDefaultAttrs) foreach_declared_int(setDefaultAttr, setDefaultAttrs)
{ {
if (ColumnDefaultsToNextVal(pgConstraintForm->conrelid, setDefaultAttr)) if (ColumnDefaultsToNextVal(pgConstraintForm->conrelid, setDefaultAttr))
{ {
@ -467,7 +467,6 @@ ForeignKeyGetDefaultingAttrs(HeapTuple pgConstraintTuple)
} }
List *onDeleteSetDefColumnList = NIL; List *onDeleteSetDefColumnList = NIL;
#if PG_VERSION_NUM >= PG_VERSION_15
Datum onDeleteSetDefColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple, Datum onDeleteSetDefColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple,
Anum_pg_constraint_confdelsetcols, Anum_pg_constraint_confdelsetcols,
&isNull); &isNull);
@ -482,7 +481,6 @@ ForeignKeyGetDefaultingAttrs(HeapTuple pgConstraintTuple)
onDeleteSetDefColumnList = onDeleteSetDefColumnList =
IntegerArrayTypeToList(DatumGetArrayTypeP(onDeleteSetDefColumnsDatum)); IntegerArrayTypeToList(DatumGetArrayTypeP(onDeleteSetDefColumnsDatum));
} }
#endif
if (list_length(onDeleteSetDefColumnList) == 0) if (list_length(onDeleteSetDefColumnList) == 0)
{ {
@ -727,7 +725,7 @@ ColumnAppearsInForeignKeyToReferenceTable(char *columnName, Oid relationId)
GetForeignKeyIdsForColumn(columnName, relationId, searchForeignKeyColumnFlags); GetForeignKeyIdsForColumn(columnName, relationId, searchForeignKeyColumnFlags);
Oid foreignKeyId = InvalidOid; Oid foreignKeyId = InvalidOid;
foreach_oid(foreignKeyId, foreignKeyIdsColumnAppeared) foreach_declared_oid(foreignKeyId, foreignKeyIdsColumnAppeared)
{ {
Oid referencedTableId = GetReferencedTableId(foreignKeyId); Oid referencedTableId = GetReferencedTableId(foreignKeyId);
if (IsCitusTableType(referencedTableId, REFERENCE_TABLE)) if (IsCitusTableType(referencedTableId, REFERENCE_TABLE))
@ -901,7 +899,7 @@ GetForeignConstraintCommandsInternal(Oid relationId, int flags)
int saveNestLevel = PushEmptySearchPath(); int saveNestLevel = PushEmptySearchPath();
Oid foreignKeyOid = InvalidOid; Oid foreignKeyOid = InvalidOid;
foreach_oid(foreignKeyOid, foreignKeyOids) foreach_declared_oid(foreignKeyOid, foreignKeyOids)
{ {
char *statementDef = pg_get_constraintdef_command(foreignKeyOid); char *statementDef = pg_get_constraintdef_command(foreignKeyOid);
@ -1157,7 +1155,7 @@ static Oid
FindForeignKeyOidWithName(List *foreignKeyOids, const char *inputConstraintName) FindForeignKeyOidWithName(List *foreignKeyOids, const char *inputConstraintName)
{ {
Oid foreignKeyOid = InvalidOid; Oid foreignKeyOid = InvalidOid;
foreach_oid(foreignKeyOid, foreignKeyOids) foreach_declared_oid(foreignKeyOid, foreignKeyOids)
{ {
char *constraintName = get_constraint_name(foreignKeyOid); char *constraintName = get_constraint_name(foreignKeyOid);
@ -1472,7 +1470,7 @@ RelationInvolvedInAnyNonInheritedForeignKeys(Oid relationId)
List *foreignKeysRelationInvolved = list_concat(referencingForeignKeys, List *foreignKeysRelationInvolved = list_concat(referencingForeignKeys,
referencedForeignKeys); referencedForeignKeys);
Oid foreignKeyId = InvalidOid; Oid foreignKeyId = InvalidOid;
foreach_oid(foreignKeyId, foreignKeysRelationInvolved) foreach_declared_oid(foreignKeyId, foreignKeysRelationInvolved)
{ {
HeapTuple heapTuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(foreignKeyId)); HeapTuple heapTuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(foreignKeyId));
if (!HeapTupleIsValid(heapTuple)) if (!HeapTupleIsValid(heapTuple))

View File

@ -86,7 +86,7 @@ static bool
NameListHasFDWOwnedByDistributedExtension(List *FDWNames) NameListHasFDWOwnedByDistributedExtension(List *FDWNames)
{ {
String *FDWValue = NULL; String *FDWValue = NULL;
foreach_ptr(FDWValue, FDWNames) foreach_declared_ptr(FDWValue, FDWNames)
{ {
/* captures the extension address during lookup */ /* captures the extension address during lookup */
ObjectAddress *extensionAddress = palloc0(sizeof(ObjectAddress)); ObjectAddress *extensionAddress = palloc0(sizeof(ObjectAddress));

View File

@ -229,7 +229,7 @@ RecreateForeignServerStmt(Oid serverId)
int location = -1; int location = -1;
DefElem *option = NULL; DefElem *option = NULL;
foreach_ptr(option, server->options) foreach_declared_ptr(option, server->options)
{ {
DefElem *copyOption = makeDefElem(option->defname, option->arg, location); DefElem *copyOption = makeDefElem(option->defname, option->arg, location);
createStmt->options = lappend(createStmt->options, copyOption); createStmt->options = lappend(createStmt->options, copyOption);
@ -247,7 +247,7 @@ static bool
NameListHasDistributedServer(List *serverNames) NameListHasDistributedServer(List *serverNames)
{ {
String *serverValue = NULL; String *serverValue = NULL;
foreach_ptr(serverValue, serverNames) foreach_declared_ptr(serverValue, serverNames)
{ {
List *addresses = GetObjectAddressByServerName(strVal(serverValue), false); List *addresses = GetObjectAddressByServerName(strVal(serverValue), false);

View File

@ -256,7 +256,7 @@ create_distributed_function(PG_FUNCTION_ARGS)
createFunctionSQL, alterFunctionOwnerSQL); createFunctionSQL, alterFunctionOwnerSQL);
List *grantDDLCommands = GrantOnFunctionDDLCommands(funcOid); List *grantDDLCommands = GrantOnFunctionDDLCommands(funcOid);
char *grantOnFunctionSQL = NULL; char *grantOnFunctionSQL = NULL;
foreach_ptr(grantOnFunctionSQL, grantDDLCommands) foreach_declared_ptr(grantOnFunctionSQL, grantDDLCommands)
{ {
appendStringInfo(&ddlCommand, ";%s", grantOnFunctionSQL); appendStringInfo(&ddlCommand, ";%s", grantOnFunctionSQL);
} }
@ -370,7 +370,7 @@ ErrorIfAnyNodeDoesNotHaveMetadata(void)
ActivePrimaryNonCoordinatorNodeList(ShareLock); ActivePrimaryNonCoordinatorNodeList(ShareLock);
WorkerNode *workerNode = NULL; WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodeList) foreach_declared_ptr(workerNode, workerNodeList)
{ {
if (!workerNode->hasMetadata) if (!workerNode->hasMetadata)
{ {
@ -1476,7 +1476,7 @@ CreateFunctionStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
objectWithArgs->objname = stmt->funcname; objectWithArgs->objname = stmt->funcname;
FunctionParameter *funcParam = NULL; FunctionParameter *funcParam = NULL;
foreach_ptr(funcParam, stmt->parameters) foreach_declared_ptr(funcParam, stmt->parameters)
{ {
if (ShouldAddFunctionSignature(funcParam->mode)) if (ShouldAddFunctionSignature(funcParam->mode))
{ {
@ -1519,7 +1519,7 @@ DefineAggregateStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess
if (stmt->args != NIL) if (stmt->args != NIL)
{ {
FunctionParameter *funcParam = NULL; FunctionParameter *funcParam = NULL;
foreach_ptr(funcParam, linitial(stmt->args)) foreach_declared_ptr(funcParam, linitial(stmt->args))
{ {
objectWithArgs->objargs = lappend(objectWithArgs->objargs, objectWithArgs->objargs = lappend(objectWithArgs->objargs,
funcParam->argType); funcParam->argType);
@ -1528,7 +1528,7 @@ DefineAggregateStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess
else else
{ {
DefElem *defItem = NULL; DefElem *defItem = NULL;
foreach_ptr(defItem, stmt->definition) foreach_declared_ptr(defItem, stmt->definition)
{ {
/* /*
* If no explicit args are given, pg includes basetype in the signature. * If no explicit args are given, pg includes basetype in the signature.
@ -1933,7 +1933,7 @@ static void
ErrorIfUnsupportedAlterFunctionStmt(AlterFunctionStmt *stmt) ErrorIfUnsupportedAlterFunctionStmt(AlterFunctionStmt *stmt)
{ {
DefElem *action = NULL; DefElem *action = NULL;
foreach_ptr(action, stmt->actions) foreach_declared_ptr(action, stmt->actions)
{ {
if (strcmp(action->defname, "set") == 0) if (strcmp(action->defname, "set") == 0)
{ {
@ -2040,7 +2040,7 @@ PreprocessGrantOnFunctionStmt(Node *node, const char *queryString,
List *grantFunctionList = NIL; List *grantFunctionList = NIL;
ObjectAddress *functionAddress = NULL; ObjectAddress *functionAddress = NULL;
foreach_ptr(functionAddress, distributedFunctions) foreach_declared_ptr(functionAddress, distributedFunctions)
{ {
ObjectWithArgs *distFunction = ObjectWithArgsFromOid( ObjectWithArgs *distFunction = ObjectWithArgsFromOid(
functionAddress->objectId); functionAddress->objectId);
@ -2083,7 +2083,7 @@ PostprocessGrantOnFunctionStmt(Node *node, const char *queryString)
} }
ObjectAddress *functionAddress = NULL; ObjectAddress *functionAddress = NULL;
foreach_ptr(functionAddress, distributedFunctions) foreach_declared_ptr(functionAddress, distributedFunctions)
{ {
EnsureAllObjectDependenciesExistOnAllNodes(list_make1(functionAddress)); EnsureAllObjectDependenciesExistOnAllNodes(list_make1(functionAddress));
} }
@ -2120,7 +2120,7 @@ FilterDistributedFunctions(GrantStmt *grantStmt)
/* iterate over all namespace names provided to get their oid's */ /* iterate over all namespace names provided to get their oid's */
String *namespaceValue = NULL; String *namespaceValue = NULL;
foreach_ptr(namespaceValue, grantStmt->objects) foreach_declared_ptr(namespaceValue, grantStmt->objects)
{ {
char *nspname = strVal(namespaceValue); char *nspname = strVal(namespaceValue);
bool missing_ok = false; bool missing_ok = false;
@ -2132,7 +2132,7 @@ FilterDistributedFunctions(GrantStmt *grantStmt)
* iterate over all distributed functions to filter the ones * iterate over all distributed functions to filter the ones
* that belong to one of the namespaces from above * that belong to one of the namespaces from above
*/ */
foreach_ptr(distributedFunction, distributedFunctionList) foreach_declared_ptr(distributedFunction, distributedFunctionList)
{ {
Oid namespaceOid = get_func_namespace(distributedFunction->objectId); Oid namespaceOid = get_func_namespace(distributedFunction->objectId);
@ -2151,7 +2151,7 @@ FilterDistributedFunctions(GrantStmt *grantStmt)
{ {
bool missingOk = false; bool missingOk = false;
ObjectWithArgs *objectWithArgs = NULL; ObjectWithArgs *objectWithArgs = NULL;
foreach_ptr(objectWithArgs, grantStmt->objects) foreach_declared_ptr(objectWithArgs, grantStmt->objects)
{ {
ObjectAddress *functionAddress = palloc0(sizeof(ObjectAddress)); ObjectAddress *functionAddress = palloc0(sizeof(ObjectAddress));
functionAddress->classId = ProcedureRelationId; functionAddress->classId = ProcedureRelationId;

View File

@ -17,6 +17,7 @@
#include "distributed/citus_ruleutils.h" #include "distributed/citus_ruleutils.h"
#include "distributed/commands.h" #include "distributed/commands.h"
#include "distributed/commands/utility_hook.h" #include "distributed/commands/utility_hook.h"
#include "distributed/deparser.h"
#include "distributed/metadata/distobject.h" #include "distributed/metadata/distobject.h"
#include "distributed/metadata_cache.h" #include "distributed/metadata_cache.h"
#include "distributed/version_compat.h" #include "distributed/version_compat.h"
@ -32,7 +33,6 @@ static List * CollectGrantTableIdList(GrantStmt *grantStmt);
* needed during the worker node portion of DDL execution before returning the * needed during the worker node portion of DDL execution before returning the
* DDLJobs in a List. If no distributed table is involved, this returns NIL. * DDLJobs in a List. If no distributed table is involved, this returns NIL.
* *
* NB: So far column level privileges are not supported.
*/ */
List * List *
PreprocessGrantStmt(Node *node, const char *queryString, PreprocessGrantStmt(Node *node, const char *queryString,
@ -70,9 +70,12 @@ PreprocessGrantStmt(Node *node, const char *queryString,
return NIL; return NIL;
} }
EnsureCoordinator();
/* deparse the privileges */ /* deparse the privileges */
if (grantStmt->privileges == NIL) if (grantStmt->privileges == NIL)
{ {
/* this is used for table level only */
appendStringInfo(&privsString, "ALL"); appendStringInfo(&privsString, "ALL");
} }
else else
@ -88,18 +91,44 @@ PreprocessGrantStmt(Node *node, const char *queryString,
{ {
appendStringInfoString(&privsString, ", "); appendStringInfoString(&privsString, ", ");
} }
if (priv->priv_name)
{
appendStringInfo(&privsString, "%s", priv->priv_name);
}
/*
* ALL can only be set alone.
* And ALL is not added as a keyword in priv_name by parser, but
* because there are column(s) defined, a grantStmt->privileges is
* defined. So we need to handle this special case here (see if
* condition above).
*/
else if (isFirst)
{
/* this is used for column level only */
appendStringInfo(&privsString, "ALL");
}
/*
* Instead of relying only on the syntax check done by Postgres and
* adding an assert here, add a default ERROR if ALL is not first
* and no priv_name is defined.
*/
else
{
ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Cannot parse GRANT/REVOKE privileges")));
}
isFirst = false; isFirst = false;
if (priv->cols != NIL) if (priv->cols != NIL)
{ {
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), StringInfoData colsString;
errmsg("grant/revoke on column list is currently " initStringInfo(&colsString);
"unsupported")));
AppendColumnNameList(&colsString, priv->cols);
appendStringInfo(&privsString, "%s", colsString.data);
} }
Assert(priv->priv_name != NULL);
appendStringInfo(&privsString, "%s", priv->priv_name);
} }
} }
@ -153,6 +182,15 @@ PreprocessGrantStmt(Node *node, const char *queryString,
appendStringInfo(&ddlString, "REVOKE %s%s ON %s FROM %s", appendStringInfo(&ddlString, "REVOKE %s%s ON %s FROM %s",
grantOption, privsString.data, targetString.data, grantOption, privsString.data, targetString.data,
granteesString.data); granteesString.data);
if (grantStmt->behavior == DROP_CASCADE)
{
appendStringInfoString(&ddlString, " CASCADE");
}
else
{
appendStringInfoString(&ddlString, " RESTRICT");
}
} }
DDLJob *ddlJob = palloc0(sizeof(DDLJob)); DDLJob *ddlJob = palloc0(sizeof(DDLJob));

View File

@ -337,7 +337,7 @@ ExecuteFunctionOnEachTableIndex(Oid relationId, PGIndexProcessor pgIndexProcesso
List *indexIdList = RelationGetIndexList(relation); List *indexIdList = RelationGetIndexList(relation);
Oid indexId = InvalidOid; Oid indexId = InvalidOid;
foreach_oid(indexId, indexIdList) foreach_declared_oid(indexId, indexIdList)
{ {
HeapTuple indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexId)); HeapTuple indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexId));
if (!HeapTupleIsValid(indexTuple)) if (!HeapTupleIsValid(indexTuple))
@ -708,7 +708,7 @@ PreprocessDropIndexStmt(Node *node, const char *dropIndexCommand,
/* check if any of the indexes being dropped belong to a distributed table */ /* check if any of the indexes being dropped belong to a distributed table */
List *objectNameList = NULL; List *objectNameList = NULL;
foreach_ptr(objectNameList, dropIndexStatement->objects) foreach_declared_ptr(objectNameList, dropIndexStatement->objects)
{ {
struct DropRelationCallbackState state; struct DropRelationCallbackState state;
uint32 rvrFlags = RVR_MISSING_OK; uint32 rvrFlags = RVR_MISSING_OK;
@ -880,7 +880,7 @@ ErrorIfUnsupportedAlterIndexStmt(AlterTableStmt *alterTableStatement)
/* error out if any of the subcommands are unsupported */ /* error out if any of the subcommands are unsupported */
List *commandList = alterTableStatement->cmds; List *commandList = alterTableStatement->cmds;
AlterTableCmd *command = NULL; AlterTableCmd *command = NULL;
foreach_ptr(command, commandList) foreach_declared_ptr(command, commandList)
{ {
AlterTableType alterTableType = command->subtype; AlterTableType alterTableType = command->subtype;
@ -932,7 +932,7 @@ CreateIndexTaskList(IndexStmt *indexStmt)
LockShardListMetadata(shardIntervalList, ShareLock); LockShardListMetadata(shardIntervalList, ShareLock);
ShardInterval *shardInterval = NULL; ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList) foreach_declared_ptr(shardInterval, shardIntervalList)
{ {
uint64 shardId = shardInterval->shardId; uint64 shardId = shardInterval->shardId;
@ -977,7 +977,7 @@ CreateReindexTaskList(Oid relationId, ReindexStmt *reindexStmt)
LockShardListMetadata(shardIntervalList, ShareLock); LockShardListMetadata(shardIntervalList, ShareLock);
ShardInterval *shardInterval = NULL; ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList) foreach_declared_ptr(shardInterval, shardIntervalList)
{ {
uint64 shardId = shardInterval->shardId; uint64 shardId = shardInterval->shardId;
@ -1115,6 +1115,7 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation, Oid relId, Oid oldRelI
char relkind; char relkind;
struct ReindexIndexCallbackState *state = arg; struct ReindexIndexCallbackState *state = arg;
LOCKMODE table_lockmode; LOCKMODE table_lockmode;
Oid table_oid;
/* /*
* Lock level here should match table lock in reindex_index() for * Lock level here should match table lock in reindex_index() for
@ -1152,13 +1153,24 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation, Oid relId, Oid oldRelI
errmsg("\"%s\" is not an index", relation->relname))); errmsg("\"%s\" is not an index", relation->relname)));
/* Check permissions */ /* Check permissions */
#if PG_VERSION_NUM >= PG_VERSION_17
table_oid = IndexGetRelation(relId, true);
if (OidIsValid(table_oid))
{
AclResult aclresult = pg_class_aclcheck(table_oid, GetUserId(), ACL_MAINTAIN);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_INDEX, relation->relname);
}
#else
if (!object_ownercheck(RelationRelationId, relId, GetUserId())) if (!object_ownercheck(RelationRelationId, relId, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_INDEX, relation->relname); aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_INDEX, relation->relname);
#endif
/* Lock heap before index to avoid deadlock. */ /* Lock heap before index to avoid deadlock. */
if (relId != oldRelId) if (relId != oldRelId)
{ {
Oid table_oid = IndexGetRelation(relId, true); table_oid = IndexGetRelation(relId, true);
/* /*
* If the OID isn't valid, it means the index was concurrently * If the OID isn't valid, it means the index was concurrently
@ -1226,7 +1238,7 @@ ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement)
Var *partitionKey = DistPartitionKeyOrError(relationId); Var *partitionKey = DistPartitionKeyOrError(relationId);
List *indexParameterList = createIndexStatement->indexParams; List *indexParameterList = createIndexStatement->indexParams;
IndexElem *indexElement = NULL; IndexElem *indexElement = NULL;
foreach_ptr(indexElement, indexParameterList) foreach_declared_ptr(indexElement, indexParameterList)
{ {
const char *columnName = indexElement->name; const char *columnName = indexElement->name;
@ -1295,7 +1307,7 @@ DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt)
LockShardListMetadata(shardIntervalList, ShareLock); LockShardListMetadata(shardIntervalList, ShareLock);
ShardInterval *shardInterval = NULL; ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList) foreach_declared_ptr(shardInterval, shardIntervalList)
{ {
uint64 shardId = shardInterval->shardId; uint64 shardId = shardInterval->shardId;
char *shardIndexName = pstrdup(indexName); char *shardIndexName = pstrdup(indexName);

View File

@ -106,6 +106,7 @@
#include "distributed/resource_lock.h" #include "distributed/resource_lock.h"
#include "distributed/shard_pruning.h" #include "distributed/shard_pruning.h"
#include "distributed/shared_connection_stats.h" #include "distributed/shared_connection_stats.h"
#include "distributed/stats/stat_counters.h"
#include "distributed/transmit.h" #include "distributed/transmit.h"
#include "distributed/version_compat.h" #include "distributed/version_compat.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
@ -301,6 +302,7 @@ static SelectStmt * CitusCopySelect(CopyStmt *copyStatement);
static void CitusCopyTo(CopyStmt *copyStatement, QueryCompletion *completionTag); static void CitusCopyTo(CopyStmt *copyStatement, QueryCompletion *completionTag);
static int64 ForwardCopyDataFromConnection(CopyOutState copyOutState, static int64 ForwardCopyDataFromConnection(CopyOutState copyOutState,
MultiConnection *connection); MultiConnection *connection);
static void ErrorIfCopyHasOnErrorLogVerbosity(CopyStmt *copyStatement);
/* Private functions copied and adapted from copy.c in PostgreSQL */ /* Private functions copied and adapted from copy.c in PostgreSQL */
static void SendCopyBegin(CopyOutState cstate); static void SendCopyBegin(CopyOutState cstate);
@ -346,6 +348,7 @@ static LocalCopyStatus GetLocalCopyStatus(void);
static bool ShardIntervalListHasLocalPlacements(List *shardIntervalList); static bool ShardIntervalListHasLocalPlacements(List *shardIntervalList);
static void LogLocalCopyToRelationExecution(uint64 shardId); static void LogLocalCopyToRelationExecution(uint64 shardId);
static void LogLocalCopyToFileExecution(uint64 shardId); static void LogLocalCopyToFileExecution(uint64 shardId);
static void ErrorIfMergeInCopy(CopyStmt *copyStatement);
/* exports for SQL callable functions */ /* exports for SQL callable functions */
@ -497,10 +500,14 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletion *completionTag)
/* set up the destination for the COPY */ /* set up the destination for the COPY */
const bool publishableData = true; const bool publishableData = true;
/* we want to track query counters for "COPY (to) distributed-table .." commands */
const bool trackQueryCounters = true;
CitusCopyDestReceiver *copyDest = CreateCitusCopyDestReceiver(tableId, columnNameList, CitusCopyDestReceiver *copyDest = CreateCitusCopyDestReceiver(tableId, columnNameList,
partitionColumnIndex, partitionColumnIndex,
executorState, NULL, executorState, NULL,
publishableData); publishableData,
trackQueryCounters);
/* if the user specified an explicit append-to_shard option, write to it */ /* if the user specified an explicit append-to_shard option, write to it */
uint64 appendShardId = ProcessAppendToShardOption(tableId, copyStatement); uint64 appendShardId = ProcessAppendToShardOption(tableId, copyStatement);
@ -1875,11 +1882,15 @@ CopyFlushOutput(CopyOutState cstate, char *start, char *pointer)
* of intermediate results that are co-located with the actual table. * of intermediate results that are co-located with the actual table.
* The names of the intermediate results with be of the form: * The names of the intermediate results with be of the form:
* intermediateResultIdPrefix_<shardid> * intermediateResultIdPrefix_<shardid>
*
* If trackQueryCounters is true, the COPY will increment the query stat
* counters as needed at the end of the COPY.
*/ */
CitusCopyDestReceiver * CitusCopyDestReceiver *
CreateCitusCopyDestReceiver(Oid tableId, List *columnNameList, int partitionColumnIndex, CreateCitusCopyDestReceiver(Oid tableId, List *columnNameList, int partitionColumnIndex,
EState *executorState, EState *executorState,
char *intermediateResultIdPrefix, bool isPublishable) char *intermediateResultIdPrefix, bool isPublishable,
bool trackQueryCounters)
{ {
CitusCopyDestReceiver *copyDest = (CitusCopyDestReceiver *) palloc0( CitusCopyDestReceiver *copyDest = (CitusCopyDestReceiver *) palloc0(
sizeof(CitusCopyDestReceiver)); sizeof(CitusCopyDestReceiver));
@ -1899,6 +1910,7 @@ CreateCitusCopyDestReceiver(Oid tableId, List *columnNameList, int partitionColu
copyDest->colocatedIntermediateResultIdPrefix = intermediateResultIdPrefix; copyDest->colocatedIntermediateResultIdPrefix = intermediateResultIdPrefix;
copyDest->memoryContext = CurrentMemoryContext; copyDest->memoryContext = CurrentMemoryContext;
copyDest->isPublishable = isPublishable; copyDest->isPublishable = isPublishable;
copyDest->trackQueryCounters = trackQueryCounters;
return copyDest; return copyDest;
} }
@ -1957,7 +1969,7 @@ ShardIntervalListHasLocalPlacements(List *shardIntervalList)
{ {
int32 localGroupId = GetLocalGroupId(); int32 localGroupId = GetLocalGroupId();
ShardInterval *shardInterval = NULL; ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList) foreach_declared_ptr(shardInterval, shardIntervalList)
{ {
if (ActiveShardPlacementOnGroup(localGroupId, shardInterval->shardId) != NULL) if (ActiveShardPlacementOnGroup(localGroupId, shardInterval->shardId) != NULL)
{ {
@ -2452,7 +2464,7 @@ ProcessAppendToShardOption(Oid relationId, CopyStmt *copyStatement)
bool appendToShardSet = false; bool appendToShardSet = false;
DefElem *defel = NULL; DefElem *defel = NULL;
foreach_ptr(defel, copyStatement->options) foreach_declared_ptr(defel, copyStatement->options)
{ {
if (strncmp(defel->defname, APPEND_TO_SHARD_OPTION, NAMEDATALEN) == 0) if (strncmp(defel->defname, APPEND_TO_SHARD_OPTION, NAMEDATALEN) == 0)
{ {
@ -2585,8 +2597,9 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu
/* /*
* CitusCopyDestReceiverShutdown implements the rShutdown interface of * CitusCopyDestReceiverShutdown implements the rShutdown interface of
* CitusCopyDestReceiver. It ends the COPY on all the open connections and closes * CitusCopyDestReceiver. It ends the COPY on all the open connections, closes
* the relation. * the relation and increments the query stat counters based on the shards
* copied into if requested.
*/ */
static void static void
CitusCopyDestReceiverShutdown(DestReceiver *destReceiver) CitusCopyDestReceiverShutdown(DestReceiver *destReceiver)
@ -2597,6 +2610,26 @@ CitusCopyDestReceiverShutdown(DestReceiver *destReceiver)
ListCell *connectionStateCell = NULL; ListCell *connectionStateCell = NULL;
Relation distributedRelation = copyDest->distributedRelation; Relation distributedRelation = copyDest->distributedRelation;
/*
* Increment the query stat counters based on the shards copied into
* if requested.
*/
if (copyDest->trackQueryCounters)
{
int copiedShardCount =
copyDest->shardStateHash ?
hash_get_num_entries(copyDest->shardStateHash) :
0;
if (copiedShardCount <= 1)
{
IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD);
}
else
{
IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD);
}
}
List *connectionStateList = ConnectionStateList(connectionStateHash); List *connectionStateList = ConnectionStateList(connectionStateHash);
FinishLocalColocatedIntermediateFiles(copyDest); FinishLocalColocatedIntermediateFiles(copyDest);
@ -2823,6 +2856,70 @@ CopyStatementHasFormat(CopyStmt *copyStatement, char *formatName)
} }
/*
* ErrorIfCopyHasOnErrorLogVerbosity errors out if the COPY statement
* has on_error option or log_verbosity option specified
*/
static void
ErrorIfCopyHasOnErrorLogVerbosity(CopyStmt *copyStatement)
{
#if PG_VERSION_NUM >= PG_VERSION_17
bool log_verbosity = false;
foreach_ptr(DefElem, option, copyStatement->options)
{
if (strcmp(option->defname, "on_error") == 0)
{
ereport(ERROR, (errmsg(
"Citus does not support COPY FROM with ON_ERROR option.")));
}
else if (strcmp(option->defname, "log_verbosity") == 0)
{
log_verbosity = true;
}
}
/*
* Given that log_verbosity is currently used in COPY FROM
* when ON_ERROR option is set to ignore, it makes more
* sense to error out for ON_ERROR option first. For this reason,
* we don't error out in the previous loop directly.
* Relevant PG17 commit: https://github.com/postgres/postgres/commit/f5a227895
*/
if (log_verbosity)
{
ereport(ERROR, (errmsg(
"Citus does not support COPY FROM with LOG_VERBOSITY option.")));
}
#endif
}
/*
* ErrorIfMergeInCopy Raises an exception if the MERGE is called in the COPY
* where Citus tables are involved, as we don't support this yet
* Relevant PG17 commit: c649fa24a
*/
static void
ErrorIfMergeInCopy(CopyStmt *copyStatement)
{
#if PG_VERSION_NUM < 170000
return;
#else
if (!copyStatement->relation && (IsA(copyStatement->query, MergeStmt)))
{
/*
* This path is currently not reachable because Merge in COPY can
* only work with a RETURNING clause, and a RETURNING check
* will error out sooner for Citus
*/
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("MERGE with Citus tables "
"is not yet supported in COPY")));
}
#endif
}
/* /*
* ProcessCopyStmt handles Citus specific concerns for COPY like supporting * ProcessCopyStmt handles Citus specific concerns for COPY like supporting
* COPYing from distributed tables and preventing unsupported actions. The * COPYing from distributed tables and preventing unsupported actions. The
@ -2860,6 +2957,8 @@ ProcessCopyStmt(CopyStmt *copyStatement, QueryCompletion *completionTag, const
*/ */
if (copyStatement->relation != NULL) if (copyStatement->relation != NULL)
{ {
ErrorIfMergeInCopy(copyStatement);
bool isFrom = copyStatement->is_from; bool isFrom = copyStatement->is_from;
/* consider using RangeVarGetRelidExtended to check perms before locking */ /* consider using RangeVarGetRelidExtended to check perms before locking */
@ -2897,6 +2996,8 @@ ProcessCopyStmt(CopyStmt *copyStatement, QueryCompletion *completionTag, const
"Citus does not support COPY FROM with WHERE"))); "Citus does not support COPY FROM with WHERE")));
} }
ErrorIfCopyHasOnErrorLogVerbosity(copyStatement);
/* check permissions, we're bypassing postgres' normal checks */ /* check permissions, we're bypassing postgres' normal checks */
CheckCopyPermissions(copyStatement); CheckCopyPermissions(copyStatement);
CitusCopyFrom(copyStatement, completionTag); CitusCopyFrom(copyStatement, completionTag);
@ -2948,7 +3049,7 @@ CitusCopySelect(CopyStmt *copyStatement)
for (int i = 0; i < tupleDescriptor->natts; i++) for (int i = 0; i < tupleDescriptor->natts; i++)
{ {
Form_pg_attribute attr = &tupleDescriptor->attrs[i]; Form_pg_attribute attr = TupleDescAttr(tupleDescriptor, i);
if (attr->attisdropped || if (attr->attisdropped ||
attr->attgenerated attr->attgenerated
@ -3071,6 +3172,15 @@ CitusCopyTo(CopyStmt *copyStatement, QueryCompletion *completionTag)
SendCopyEnd(copyOutState); SendCopyEnd(copyOutState);
if (list_length(shardIntervalList) <= 1)
{
IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD);
}
else
{
IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD);
}
table_close(distributedRelation, AccessShareLock); table_close(distributedRelation, AccessShareLock);
if (completionTag != NULL) if (completionTag != NULL)

View File

@ -255,7 +255,7 @@ static void
DropRoleStmtUnmarkDistOnLocalMainDb(DropRoleStmt *dropRoleStmt) DropRoleStmtUnmarkDistOnLocalMainDb(DropRoleStmt *dropRoleStmt)
{ {
RoleSpec *roleSpec = NULL; RoleSpec *roleSpec = NULL;
foreach_ptr(roleSpec, dropRoleStmt->roles) foreach_declared_ptr(roleSpec, dropRoleStmt->roles)
{ {
Oid roleOid = get_role_oid(roleSpec->rolename, Oid roleOid = get_role_oid(roleSpec->rolename,
dropRoleStmt->missing_ok); dropRoleStmt->missing_ok);

View File

@ -48,7 +48,7 @@ CreatePolicyCommands(Oid relationId)
List *policyList = GetPolicyListForRelation(relationId); List *policyList = GetPolicyListForRelation(relationId);
RowSecurityPolicy *policy; RowSecurityPolicy *policy;
foreach_ptr(policy, policyList) foreach_declared_ptr(policy, policyList)
{ {
char *createPolicyCommand = CreatePolicyCommandForPolicy(relationId, policy); char *createPolicyCommand = CreatePolicyCommandForPolicy(relationId, policy);
commands = lappend(commands, makeTableDDLCommandString(createPolicyCommand)); commands = lappend(commands, makeTableDDLCommandString(createPolicyCommand));
@ -88,7 +88,7 @@ GetPolicyListForRelation(Oid relationId)
List *policyList = NIL; List *policyList = NIL;
RowSecurityPolicy *policy; RowSecurityPolicy *policy;
foreach_ptr(policy, relation->rd_rsdesc->policies) foreach_declared_ptr(policy, relation->rd_rsdesc->policies)
{ {
policyList = lappend(policyList, policy); policyList = lappend(policyList, policy);
} }
@ -310,7 +310,7 @@ GetPolicyByName(Oid relationId, const char *policyName)
List *policyList = GetPolicyListForRelation(relationId); List *policyList = GetPolicyListForRelation(relationId);
RowSecurityPolicy *policy = NULL; RowSecurityPolicy *policy = NULL;
foreach_ptr(policy, policyList) foreach_declared_ptr(policy, policyList)
{ {
if (strncmp(policy->policy_name, policyName, NAMEDATALEN) == 0) if (strncmp(policy->policy_name, policyName, NAMEDATALEN) == 0)
{ {

View File

@ -33,11 +33,9 @@
static CreatePublicationStmt * BuildCreatePublicationStmt(Oid publicationId); static CreatePublicationStmt * BuildCreatePublicationStmt(Oid publicationId);
#if (PG_VERSION_NUM >= PG_VERSION_15)
static PublicationObjSpec * BuildPublicationRelationObjSpec(Oid relationId, static PublicationObjSpec * BuildPublicationRelationObjSpec(Oid relationId,
Oid publicationId, Oid publicationId,
bool tableOnly); bool tableOnly);
#endif
static void AppendPublishOptionList(StringInfo str, List *strings); static void AppendPublishOptionList(StringInfo str, List *strings);
static char * AlterPublicationOwnerCommand(Oid publicationId); static char * AlterPublicationOwnerCommand(Oid publicationId);
static bool ShouldPropagateCreatePublication(CreatePublicationStmt *stmt); static bool ShouldPropagateCreatePublication(CreatePublicationStmt *stmt);
@ -154,11 +152,10 @@ BuildCreatePublicationStmt(Oid publicationId)
ReleaseSysCache(publicationTuple); ReleaseSysCache(publicationTuple);
#if (PG_VERSION_NUM >= PG_VERSION_15)
List *schemaIds = GetPublicationSchemas(publicationId); List *schemaIds = GetPublicationSchemas(publicationId);
Oid schemaId = InvalidOid; Oid schemaId = InvalidOid;
foreach_oid(schemaId, schemaIds) foreach_declared_oid(schemaId, schemaIds)
{ {
char *schemaName = get_namespace_name(schemaId); char *schemaName = get_namespace_name(schemaId);
@ -170,7 +167,6 @@ BuildCreatePublicationStmt(Oid publicationId)
createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject); createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject);
} }
#endif
List *relationIds = GetPublicationRelations(publicationId, List *relationIds = GetPublicationRelations(publicationId,
publicationForm->pubviaroot ? publicationForm->pubviaroot ?
@ -181,9 +177,8 @@ BuildCreatePublicationStmt(Oid publicationId)
/* mainly for consistent ordering in test output */ /* mainly for consistent ordering in test output */
relationIds = SortList(relationIds, CompareOids); relationIds = SortList(relationIds, CompareOids);
foreach_oid(relationId, relationIds) foreach_declared_oid(relationId, relationIds)
{ {
#if (PG_VERSION_NUM >= PG_VERSION_15)
bool tableOnly = false; bool tableOnly = false;
/* since postgres 15, tables can have a column list and filter */ /* since postgres 15, tables can have a column list and filter */
@ -191,15 +186,6 @@ BuildCreatePublicationStmt(Oid publicationId)
BuildPublicationRelationObjSpec(relationId, publicationId, tableOnly); BuildPublicationRelationObjSpec(relationId, publicationId, tableOnly);
createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject); createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject);
#else
/* before postgres 15, only full tables are supported */
char *schemaName = get_namespace_name(get_rel_namespace(relationId));
char *tableName = get_rel_name(relationId);
RangeVar *rangeVar = makeRangeVar(schemaName, tableName, -1);
createPubStmt->tables = lappend(createPubStmt->tables, rangeVar);
#endif
} }
/* WITH (publish_via_partition_root = true) option */ /* WITH (publish_via_partition_root = true) option */
@ -270,8 +256,6 @@ AppendPublishOptionList(StringInfo str, List *options)
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* BuildPublicationRelationObjSpec returns a PublicationObjSpec that * BuildPublicationRelationObjSpec returns a PublicationObjSpec that
* can be included in a CREATE or ALTER PUBLICATION statement. * can be included in a CREATE or ALTER PUBLICATION statement.
@ -351,9 +335,6 @@ BuildPublicationRelationObjSpec(Oid relationId, Oid publicationId,
} }
#endif
/* /*
* PreprocessAlterPublicationStmt handles ALTER PUBLICATION statements * PreprocessAlterPublicationStmt handles ALTER PUBLICATION statements
* in a way that is mostly similar to PreprocessAlterDistributedObjectStmt, * in a way that is mostly similar to PreprocessAlterDistributedObjectStmt,
@ -414,7 +395,7 @@ GetAlterPublicationDDLCommandsForTable(Oid relationId, bool isAdd)
List *publicationIds = GetRelationPublications(relationId); List *publicationIds = GetRelationPublications(relationId);
Oid publicationId = InvalidOid; Oid publicationId = InvalidOid;
foreach_oid(publicationId, publicationIds) foreach_declared_oid(publicationId, publicationIds)
{ {
char *command = GetAlterPublicationTableDDLCommand(publicationId, char *command = GetAlterPublicationTableDDLCommand(publicationId,
relationId, isAdd); relationId, isAdd);
@ -452,7 +433,6 @@ GetAlterPublicationTableDDLCommand(Oid publicationId, Oid relationId,
ReleaseSysCache(pubTuple); ReleaseSysCache(pubTuple);
#if (PG_VERSION_NUM >= PG_VERSION_15)
bool tableOnly = !isAdd; bool tableOnly = !isAdd;
/* since postgres 15, tables can have a column list and filter */ /* since postgres 15, tables can have a column list and filter */
@ -461,16 +441,6 @@ GetAlterPublicationTableDDLCommand(Oid publicationId, Oid relationId,
alterPubStmt->pubobjects = lappend(alterPubStmt->pubobjects, publicationObject); alterPubStmt->pubobjects = lappend(alterPubStmt->pubobjects, publicationObject);
alterPubStmt->action = isAdd ? AP_AddObjects : AP_DropObjects; alterPubStmt->action = isAdd ? AP_AddObjects : AP_DropObjects;
#else
/* before postgres 15, only full tables are supported */
char *schemaName = get_namespace_name(get_rel_namespace(relationId));
char *tableName = get_rel_name(relationId);
RangeVar *rangeVar = makeRangeVar(schemaName, tableName, -1);
alterPubStmt->tables = lappend(alterPubStmt->tables, rangeVar);
alterPubStmt->tableAction = isAdd ? DEFELEM_ADD : DEFELEM_DROP;
#endif
/* we take the WHERE clause from the catalog where it is already transformed */ /* we take the WHERE clause from the catalog where it is already transformed */
bool whereClauseNeedsTransform = false; bool whereClauseNeedsTransform = false;

View File

@ -74,7 +74,9 @@ static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple,
TupleDesc DbRoleSettingDescription); TupleDesc DbRoleSettingDescription);
static char * GetDatabaseNameFromDbRoleSetting(HeapTuple tuple, static char * GetDatabaseNameFromDbRoleSetting(HeapTuple tuple,
TupleDesc DbRoleSettingDescription); TupleDesc DbRoleSettingDescription);
#if PG_VERSION_NUM < PG_VERSION_17
static Node * makeStringConst(char *str, int location); static Node * makeStringConst(char *str, int location);
#endif
static Node * makeIntConst(int val, int location); static Node * makeIntConst(int val, int location);
static Node * makeFloatConst(char *str, int location); static Node * makeFloatConst(char *str, int location);
static const char * WrapQueryInAlterRoleIfExistsCall(const char *query, RoleSpec *role); static const char * WrapQueryInAlterRoleIfExistsCall(const char *query, RoleSpec *role);
@ -163,7 +165,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString)
AlterRoleStmt *stmt = castNode(AlterRoleStmt, node); AlterRoleStmt *stmt = castNode(AlterRoleStmt, node);
DefElem *option = NULL; DefElem *option = NULL;
foreach_ptr(option, stmt->options) foreach_declared_ptr(option, stmt->options)
{ {
if (strcasecmp(option->defname, "password") == 0) if (strcasecmp(option->defname, "password") == 0)
{ {
@ -564,7 +566,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
{ {
List *grantRoleStmts = GenerateGrantRoleStmtsOfRole(roleOid); List *grantRoleStmts = GenerateGrantRoleStmtsOfRole(roleOid);
Node *stmt = NULL; Node *stmt = NULL;
foreach_ptr(stmt, grantRoleStmts) foreach_declared_ptr(stmt, grantRoleStmts)
{ {
completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt)); completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt));
} }
@ -578,7 +580,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid)
*/ */
List *secLabelOnRoleStmts = GenerateSecLabelOnRoleStmts(roleOid, rolename); List *secLabelOnRoleStmts = GenerateSecLabelOnRoleStmts(roleOid, rolename);
stmt = NULL; stmt = NULL;
foreach_ptr(stmt, secLabelOnRoleStmts) foreach_declared_ptr(stmt, secLabelOnRoleStmts)
{ {
completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt)); completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt));
} }
@ -787,7 +789,7 @@ MakeSetStatementArguments(char *configurationName, char *configurationValue)
} }
char *configuration = NULL; char *configuration = NULL;
foreach_ptr(configuration, configurationList) foreach_declared_ptr(configuration, configurationList)
{ {
Node *arg = makeStringConst(configuration, -1); Node *arg = makeStringConst(configuration, -1);
args = lappend(args, arg); args = lappend(args, arg);
@ -823,7 +825,7 @@ GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options)
List *stmts = NIL; List *stmts = NIL;
DefElem *option = NULL; DefElem *option = NULL;
foreach_ptr(option, options) foreach_declared_ptr(option, options)
{ {
if (strcmp(option->defname, "adminmembers") != 0 && if (strcmp(option->defname, "adminmembers") != 0 &&
strcmp(option->defname, "rolemembers") != 0 && strcmp(option->defname, "rolemembers") != 0 &&
@ -1047,7 +1049,7 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString,
/* deparse all grant statements and add them to the commands list */ /* deparse all grant statements and add them to the commands list */
Node *stmt = NULL; Node *stmt = NULL;
foreach_ptr(stmt, grantRoleStmts) foreach_declared_ptr(stmt, grantRoleStmts)
{ {
commands = lappend(commands, DeparseTreeNode(stmt)); commands = lappend(commands, DeparseTreeNode(stmt));
} }
@ -1058,6 +1060,8 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString,
} }
#if PG_VERSION_NUM < PG_VERSION_17
/* /*
* makeStringConst creates a Const Node that stores a given string * makeStringConst creates a Const Node that stores a given string
* *
@ -1068,19 +1072,17 @@ makeStringConst(char *str, int location)
{ {
A_Const *n = makeNode(A_Const); A_Const *n = makeNode(A_Const);
#if PG_VERSION_NUM >= PG_VERSION_15
n->val.sval.type = T_String; n->val.sval.type = T_String;
n->val.sval.sval = str; n->val.sval.sval = str;
#else
n->val.type = T_String;
n->val.val.str = str;
#endif
n->location = location; n->location = location;
return (Node *) n; return (Node *) n;
} }
#endif
/* /*
* makeIntConst creates a Const Node that stores a given integer * makeIntConst creates a Const Node that stores a given integer
* *
@ -1091,13 +1093,8 @@ makeIntConst(int val, int location)
{ {
A_Const *n = makeNode(A_Const); A_Const *n = makeNode(A_Const);
#if PG_VERSION_NUM >= PG_VERSION_15
n->val.ival.type = T_Integer; n->val.ival.type = T_Integer;
n->val.ival.ival = val; n->val.ival.ival = val;
#else
n->val.type = T_Integer;
n->val.val.ival = val;
#endif
n->location = location; n->location = location;
return (Node *) n; return (Node *) n;
@ -1114,13 +1111,8 @@ makeFloatConst(char *str, int location)
{ {
A_Const *n = makeNode(A_Const); A_Const *n = makeNode(A_Const);
#if PG_VERSION_NUM >= PG_VERSION_15
n->val.fval.type = T_Float; n->val.fval.type = T_Float;
n->val.fval.fval = str; n->val.fval.fval = str;
#else
n->val.type = T_Float;
n->val.val.str = str;
#endif
n->location = location; n->location = location;
return (Node *) n; return (Node *) n;
@ -1174,7 +1166,7 @@ void
UnmarkRolesDistributed(List *roles) UnmarkRolesDistributed(List *roles)
{ {
Node *roleNode = NULL; Node *roleNode = NULL;
foreach_ptr(roleNode, roles) foreach_declared_ptr(roleNode, roles)
{ {
RoleSpec *role = castNode(RoleSpec, roleNode); RoleSpec *role = castNode(RoleSpec, roleNode);
ObjectAddress roleAddress = { 0 }; ObjectAddress roleAddress = { 0 };
@ -1204,7 +1196,7 @@ FilterDistributedRoles(List *roles)
{ {
List *distributedRoles = NIL; List *distributedRoles = NIL;
Node *roleNode = NULL; Node *roleNode = NULL;
foreach_ptr(roleNode, roles) foreach_declared_ptr(roleNode, roles)
{ {
RoleSpec *role = castNode(RoleSpec, roleNode); RoleSpec *role = castNode(RoleSpec, roleNode);
Oid roleOid = get_rolespec_oid(role, true); Oid roleOid = get_rolespec_oid(role, true);
@ -1282,7 +1274,7 @@ PostprocessGrantRoleStmt(Node *node, const char *queryString)
GrantRoleStmt *stmt = castNode(GrantRoleStmt, node); GrantRoleStmt *stmt = castNode(GrantRoleStmt, node);
RoleSpec *role = NULL; RoleSpec *role = NULL;
foreach_ptr(role, stmt->grantee_roles) foreach_declared_ptr(role, stmt->grantee_roles)
{ {
Oid roleOid = get_rolespec_oid(role, false); Oid roleOid = get_rolespec_oid(role, false);
ObjectAddress *roleAddress = palloc0(sizeof(ObjectAddress)); ObjectAddress *roleAddress = palloc0(sizeof(ObjectAddress));

View File

@ -162,7 +162,7 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString,
EnsureSequentialMode(OBJECT_SCHEMA); EnsureSequentialMode(OBJECT_SCHEMA);
String *schemaVal = NULL; String *schemaVal = NULL;
foreach_ptr(schemaVal, distributedSchemas) foreach_declared_ptr(schemaVal, distributedSchemas)
{ {
if (SchemaHasDistributedTableWithFKey(strVal(schemaVal))) if (SchemaHasDistributedTableWithFKey(strVal(schemaVal)))
{ {
@ -322,7 +322,7 @@ FilterDistributedSchemas(List *schemas)
List *distributedSchemas = NIL; List *distributedSchemas = NIL;
String *schemaValue = NULL; String *schemaValue = NULL;
foreach_ptr(schemaValue, schemas) foreach_declared_ptr(schemaValue, schemas)
{ {
const char *schemaName = strVal(schemaValue); const char *schemaName = strVal(schemaValue);
Oid schemaOid = get_namespace_oid(schemaName, true); Oid schemaOid = get_namespace_oid(schemaName, true);
@ -443,7 +443,7 @@ GetGrantCommandsFromCreateSchemaStmt(Node *node)
CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node); CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node);
Node *element = NULL; Node *element = NULL;
foreach_ptr(element, stmt->schemaElts) foreach_declared_ptr(element, stmt->schemaElts)
{ {
if (!IsA(element, GrantStmt)) if (!IsA(element, GrantStmt))
{ {
@ -480,7 +480,7 @@ static bool
CreateSchemaStmtCreatesTable(CreateSchemaStmt *stmt) CreateSchemaStmtCreatesTable(CreateSchemaStmt *stmt)
{ {
Node *element = NULL; Node *element = NULL;
foreach_ptr(element, stmt->schemaElts) foreach_declared_ptr(element, stmt->schemaElts)
{ {
/* /*
* CREATE TABLE AS and CREATE FOREIGN TABLE commands cannot be * CREATE TABLE AS and CREATE FOREIGN TABLE commands cannot be

View File

@ -174,7 +174,7 @@ EnsureTableKindSupportedForTenantSchema(Oid relationId)
List *partitionList = PartitionList(relationId); List *partitionList = PartitionList(relationId);
Oid partitionRelationId = InvalidOid; Oid partitionRelationId = InvalidOid;
foreach_oid(partitionRelationId, partitionList) foreach_declared_oid(partitionRelationId, partitionList)
{ {
ErrorIfIllegalPartitioningInTenantSchema(relationId, partitionRelationId); ErrorIfIllegalPartitioningInTenantSchema(relationId, partitionRelationId);
} }
@ -199,7 +199,7 @@ EnsureFKeysForTenantTable(Oid relationId)
int fKeyReferencingFlags = INCLUDE_REFERENCING_CONSTRAINTS | INCLUDE_ALL_TABLE_TYPES; int fKeyReferencingFlags = INCLUDE_REFERENCING_CONSTRAINTS | INCLUDE_ALL_TABLE_TYPES;
List *referencingForeignKeys = GetForeignKeyOids(relationId, fKeyReferencingFlags); List *referencingForeignKeys = GetForeignKeyOids(relationId, fKeyReferencingFlags);
Oid foreignKeyId = InvalidOid; Oid foreignKeyId = InvalidOid;
foreach_oid(foreignKeyId, referencingForeignKeys) foreach_declared_oid(foreignKeyId, referencingForeignKeys)
{ {
Oid referencingTableId = GetReferencingTableId(foreignKeyId); Oid referencingTableId = GetReferencingTableId(foreignKeyId);
Oid referencedTableId = GetReferencedTableId(foreignKeyId); Oid referencedTableId = GetReferencedTableId(foreignKeyId);
@ -232,7 +232,7 @@ EnsureFKeysForTenantTable(Oid relationId)
int fKeyReferencedFlags = INCLUDE_REFERENCED_CONSTRAINTS | INCLUDE_ALL_TABLE_TYPES; int fKeyReferencedFlags = INCLUDE_REFERENCED_CONSTRAINTS | INCLUDE_ALL_TABLE_TYPES;
List *referencedForeignKeys = GetForeignKeyOids(relationId, fKeyReferencedFlags); List *referencedForeignKeys = GetForeignKeyOids(relationId, fKeyReferencedFlags);
foreach_oid(foreignKeyId, referencedForeignKeys) foreach_declared_oid(foreignKeyId, referencedForeignKeys)
{ {
Oid referencingTableId = GetReferencingTableId(foreignKeyId); Oid referencingTableId = GetReferencingTableId(foreignKeyId);
Oid referencedTableId = GetReferencedTableId(foreignKeyId); Oid referencedTableId = GetReferencedTableId(foreignKeyId);
@ -429,7 +429,7 @@ EnsureSchemaCanBeDistributed(Oid schemaId, List *schemaTableIdList)
} }
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, schemaTableIdList) foreach_declared_oid(relationId, schemaTableIdList)
{ {
EnsureTenantTable(relationId, "citus_schema_distribute"); EnsureTenantTable(relationId, "citus_schema_distribute");
} }
@ -637,7 +637,7 @@ citus_schema_distribute(PG_FUNCTION_ARGS)
List *tableIdListInSchema = SchemaGetNonShardTableIdList(schemaId); List *tableIdListInSchema = SchemaGetNonShardTableIdList(schemaId);
List *tableIdListToConvert = NIL; List *tableIdListToConvert = NIL;
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, tableIdListInSchema) foreach_declared_oid(relationId, tableIdListInSchema)
{ {
/* prevent concurrent drop of the relation */ /* prevent concurrent drop of the relation */
LockRelationOid(relationId, AccessShareLock); LockRelationOid(relationId, AccessShareLock);
@ -675,7 +675,7 @@ citus_schema_distribute(PG_FUNCTION_ARGS)
* tables. * tables.
*/ */
List *originalForeignKeyRecreationCommands = NIL; List *originalForeignKeyRecreationCommands = NIL;
foreach_oid(relationId, tableIdListToConvert) foreach_declared_oid(relationId, tableIdListToConvert)
{ {
List *fkeyCommandsForRelation = List *fkeyCommandsForRelation =
GetFKeyCreationCommandsRelationInvolvedWithTableType(relationId, GetFKeyCreationCommandsRelationInvolvedWithTableType(relationId,
@ -741,7 +741,7 @@ citus_schema_undistribute(PG_FUNCTION_ARGS)
List *tableIdListInSchema = SchemaGetNonShardTableIdList(schemaId); List *tableIdListInSchema = SchemaGetNonShardTableIdList(schemaId);
List *tableIdListToConvert = NIL; List *tableIdListToConvert = NIL;
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, tableIdListInSchema) foreach_declared_oid(relationId, tableIdListInSchema)
{ {
/* prevent concurrent drop of the relation */ /* prevent concurrent drop of the relation */
LockRelationOid(relationId, AccessShareLock); LockRelationOid(relationId, AccessShareLock);
@ -883,7 +883,7 @@ TenantSchemaPickAnchorShardId(Oid schemaId)
} }
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, tablesInSchema) foreach_declared_oid(relationId, tablesInSchema)
{ {
/* /*
* Make sure the relation isn't dropped for the remainder of * Make sure the relation isn't dropped for the remainder of

View File

@ -15,19 +15,18 @@
#include "distributed/commands/utility_hook.h" #include "distributed/commands/utility_hook.h"
#include "distributed/coordinator_protocol.h" #include "distributed/coordinator_protocol.h"
#include "distributed/deparser.h" #include "distributed/deparser.h"
#include "distributed/listutils.h"
#include "distributed/log_utils.h" #include "distributed/log_utils.h"
#include "distributed/metadata/distobject.h" #include "distributed/metadata/distobject.h"
#include "distributed/metadata_sync.h" #include "distributed/metadata_sync.h"
/* /*
* PostprocessSecLabelStmt prepares the commands that need to be run on all workers to assign * PostprocessRoleSecLabelStmt prepares the commands that need to be run on all workers to assign
* security labels on distributed objects, currently supporting just Role objects. * security labels on distributed roles. It also ensures that all object dependencies exist on all
* It also ensures that all object dependencies exist on all * nodes for the role in the SecLabelStmt.
* nodes for the object in the SecLabelStmt.
*/ */
List * List *
PostprocessSecLabelStmt(Node *node, const char *queryString) PostprocessRoleSecLabelStmt(Node *node, const char *queryString)
{ {
if (!EnableAlterRolePropagation || !ShouldPropagate()) if (!EnableAlterRolePropagation || !ShouldPropagate())
{ {
@ -42,34 +41,91 @@ PostprocessSecLabelStmt(Node *node, const char *queryString)
return NIL; return NIL;
} }
if (secLabelStmt->objtype != OBJECT_ROLE) EnsurePropagationToCoordinator();
EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses);
const char *secLabelCommands = DeparseTreeNode((Node *) secLabelStmt);
List *commandList = list_make3(DISABLE_DDL_PROPAGATION,
(void *) secLabelCommands,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(REMOTE_NODES, commandList);
}
/*
* PostprocessTableOrColumnSecLabelStmt prepares the commands that need to be run on all
* workers to assign security labels on distributed tables or the columns of a distributed
* table. It also ensures that all object dependencies exist on all nodes for the table in
* the SecLabelStmt.
*/
List *
PostprocessTableOrColumnSecLabelStmt(Node *node, const char *queryString)
{
if (!EnableAlterRolePropagation || !ShouldPropagate())
{ {
/*
* If we are not in the coordinator, we don't want to interrupt the security
* label command with notices, the user expects that from the worker node
* the command will not be propagated
*/
if (EnableUnsupportedFeatureMessages && IsCoordinator())
{
ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands whose "
"object type is not role"),
errhint("Connect to worker nodes directly to manually "
"run the same SECURITY LABEL command.")));
}
return NIL; return NIL;
} }
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
List *objectAddresses = GetObjectAddressListFromParseTree(node, false, true);
if (!IsAnyParentObjectDistributed(objectAddresses))
{
return NIL;
}
EnsurePropagationToCoordinator(); EnsurePropagationToCoordinator();
EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses); EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses);
const char *secLabelCommands = DeparseTreeNode((Node *) secLabelStmt); const char *secLabelCommands = DeparseTreeNode((Node *) secLabelStmt);
List *commandList = list_make3(DISABLE_DDL_PROPAGATION, List *commandList = list_make3(DISABLE_DDL_PROPAGATION,
(void *) secLabelCommands, (void *) secLabelCommands,
ENABLE_DDL_PROPAGATION); ENABLE_DDL_PROPAGATION);
List *DDLJobs = NodeDDLTaskList(REMOTE_NODES, commandList);
ListCell *lc = NULL;
return NodeDDLTaskList(REMOTE_NODES, commandList); /*
* The label is for a table or a column, so we need to set the targetObjectAddress
* of the DDLJob to the relationId of the table. This is needed to ensure that
* the search path is correctly set for the remote security label command; it
* needs to be able to resolve the table that the label is being defined on.
*/
Assert(list_length(objectAddresses) == 1);
ObjectAddress *target = linitial(objectAddresses);
Oid relationId = target->objectId;
Assert(relationId != InvalidOid);
foreach(lc, DDLJobs)
{
DDLJob *ddlJob = (DDLJob *) lfirst(lc);
ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId);
}
return DDLJobs;
}
/*
* PostprocessAnySecLabelStmt is used for any other object types
* that are not supported by Citus. It issues a notice to the client
* if appropriate. Is effectively a nop.
*/
List *
PostprocessAnySecLabelStmt(Node *node, const char *queryString)
{
/*
* If we are not in the coordinator, we don't want to interrupt the security
* label command with notices, the user expects that from the worker node
* the command will not be propagated
*/
if (EnableUnsupportedFeatureMessages && IsCoordinator())
{
ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands whose "
"object type is not role or table or column"),
errhint("Connect to worker nodes directly to manually "
"run the same SECURITY LABEL command.")));
}
return NIL;
} }

View File

@ -123,7 +123,7 @@ static bool
OptionsSpecifyOwnedBy(List *optionList, Oid *ownedByTableId) OptionsSpecifyOwnedBy(List *optionList, Oid *ownedByTableId)
{ {
DefElem *defElem = NULL; DefElem *defElem = NULL;
foreach_ptr(defElem, optionList) foreach_declared_ptr(defElem, optionList)
{ {
if (strcmp(defElem->defname, "owned_by") == 0) if (strcmp(defElem->defname, "owned_by") == 0)
{ {
@ -202,7 +202,7 @@ ExtractDefaultColumnsAndOwnedSequences(Oid relationId, List **columnNameList,
} }
Oid ownedSequenceId = InvalidOid; Oid ownedSequenceId = InvalidOid;
foreach_oid(ownedSequenceId, columnOwnedSequences) foreach_declared_oid(ownedSequenceId, columnOwnedSequences)
{ {
/* /*
* A column might have multiple sequences one via OWNED BY one another * A column might have multiple sequences one via OWNED BY one another
@ -288,7 +288,7 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString,
*/ */
List *deletingSequencesList = stmt->objects; List *deletingSequencesList = stmt->objects;
List *objectNameList = NULL; List *objectNameList = NULL;
foreach_ptr(objectNameList, deletingSequencesList) foreach_declared_ptr(objectNameList, deletingSequencesList)
{ {
RangeVar *seq = makeRangeVarFromNameList(objectNameList); RangeVar *seq = makeRangeVarFromNameList(objectNameList);
@ -322,7 +322,7 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString,
/* remove the entries for the distributed objects on dropping */ /* remove the entries for the distributed objects on dropping */
ObjectAddress *address = NULL; ObjectAddress *address = NULL;
foreach_ptr(address, distributedSequenceAddresses) foreach_declared_ptr(address, distributedSequenceAddresses)
{ {
UnmarkObjectDistributed(address); UnmarkObjectDistributed(address);
} }
@ -356,7 +356,7 @@ SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok, bool isPostprocess)
List *droppingSequencesList = dropSeqStmt->objects; List *droppingSequencesList = dropSeqStmt->objects;
List *objectNameList = NULL; List *objectNameList = NULL;
foreach_ptr(objectNameList, droppingSequencesList) foreach_declared_ptr(objectNameList, droppingSequencesList)
{ {
RangeVar *seq = makeRangeVarFromNameList(objectNameList); RangeVar *seq = makeRangeVarFromNameList(objectNameList);
@ -476,7 +476,7 @@ PreprocessAlterSequenceStmt(Node *node, const char *queryString,
{ {
List *options = stmt->options; List *options = stmt->options;
DefElem *defel = NULL; DefElem *defel = NULL;
foreach_ptr(defel, options) foreach_declared_ptr(defel, options)
{ {
if (strcmp(defel->defname, "as") == 0) if (strcmp(defel->defname, "as") == 0)
{ {
@ -511,7 +511,7 @@ SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress, char depTyp
Oid relationId; Oid relationId;
List *relations = GetDependentRelationsWithSequence(sequenceAddress->objectId, List *relations = GetDependentRelationsWithSequence(sequenceAddress->objectId,
depType); depType);
foreach_oid(relationId, relations) foreach_declared_oid(relationId, relations)
{ {
if (IsCitusTable(relationId)) if (IsCitusTable(relationId))
{ {
@ -735,8 +735,6 @@ PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString)
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* PreprocessAlterSequencePersistenceStmt is called for change of persistence * PreprocessAlterSequencePersistenceStmt is called for change of persistence
* of sequences before the persistence is changed on the local instance. * of sequences before the persistence is changed on the local instance.
@ -847,9 +845,6 @@ PreprocessSequenceAlterTableStmt(Node *node, const char *queryString,
} }
#endif
/* /*
* PreprocessGrantOnSequenceStmt is executed before the statement is applied to the local * PreprocessGrantOnSequenceStmt is executed before the statement is applied to the local
* postgres instance. * postgres instance.
@ -930,7 +925,7 @@ PostprocessGrantOnSequenceStmt(Node *node, const char *queryString)
EnsureCoordinator(); EnsureCoordinator();
RangeVar *sequence = NULL; RangeVar *sequence = NULL;
foreach_ptr(sequence, distributedSequences) foreach_declared_ptr(sequence, distributedSequences)
{ {
ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress));
Oid sequenceOid = RangeVarGetRelid(sequence, NoLock, false); Oid sequenceOid = RangeVarGetRelid(sequence, NoLock, false);
@ -1014,7 +1009,7 @@ FilterDistributedSequences(GrantStmt *stmt)
/* iterate over all namespace names provided to get their oid's */ /* iterate over all namespace names provided to get their oid's */
List *namespaceOidList = NIL; List *namespaceOidList = NIL;
String *namespaceValue = NULL; String *namespaceValue = NULL;
foreach_ptr(namespaceValue, stmt->objects) foreach_declared_ptr(namespaceValue, stmt->objects)
{ {
char *nspname = strVal(namespaceValue); char *nspname = strVal(namespaceValue);
bool missing_ok = false; bool missing_ok = false;
@ -1028,7 +1023,7 @@ FilterDistributedSequences(GrantStmt *stmt)
*/ */
List *distributedSequenceList = DistributedSequenceList(); List *distributedSequenceList = DistributedSequenceList();
ObjectAddress *sequenceAddress = NULL; ObjectAddress *sequenceAddress = NULL;
foreach_ptr(sequenceAddress, distributedSequenceList) foreach_declared_ptr(sequenceAddress, distributedSequenceList)
{ {
Oid namespaceOid = get_rel_namespace(sequenceAddress->objectId); Oid namespaceOid = get_rel_namespace(sequenceAddress->objectId);
@ -1052,7 +1047,7 @@ FilterDistributedSequences(GrantStmt *stmt)
{ {
bool missing_ok = false; bool missing_ok = false;
RangeVar *sequenceRangeVar = NULL; RangeVar *sequenceRangeVar = NULL;
foreach_ptr(sequenceRangeVar, stmt->objects) foreach_declared_ptr(sequenceRangeVar, stmt->objects)
{ {
Oid sequenceOid = RangeVarGetRelid(sequenceRangeVar, NoLock, missing_ok); Oid sequenceOid = RangeVarGetRelid(sequenceRangeVar, NoLock, missing_ok);
ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress));

View File

@ -26,9 +26,9 @@
#include "distributed/adaptive_executor.h" #include "distributed/adaptive_executor.h"
#include "distributed/argutils.h" #include "distributed/argutils.h"
#include "distributed/commands/serialize_distributed_ddls.h"
#include "distributed/deparse_shard_query.h" #include "distributed/deparse_shard_query.h"
#include "distributed/resource_lock.h" #include "distributed/resource_lock.h"
#include "distributed/serialize_distributed_ddls.h"
PG_FUNCTION_INFO_V1(citus_internal_acquire_citus_advisory_object_class_lock); PG_FUNCTION_INFO_V1(citus_internal_acquire_citus_advisory_object_class_lock);

View File

@ -184,7 +184,7 @@ PreprocessDropStatisticsStmt(Node *node, const char *queryString,
List *ddlJobs = NIL; List *ddlJobs = NIL;
List *processedStatsOids = NIL; List *processedStatsOids = NIL;
List *objectNameList = NULL; List *objectNameList = NULL;
foreach_ptr(objectNameList, dropStatisticsStmt->objects) foreach_declared_ptr(objectNameList, dropStatisticsStmt->objects)
{ {
Oid statsOid = get_statistics_object_oid(objectNameList, Oid statsOid = get_statistics_object_oid(objectNameList,
dropStatisticsStmt->missing_ok); dropStatisticsStmt->missing_ok);
@ -234,7 +234,7 @@ DropStatisticsObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
List *objectAddresses = NIL; List *objectAddresses = NIL;
List *objectNameList = NULL; List *objectNameList = NULL;
foreach_ptr(objectNameList, dropStatisticsStmt->objects) foreach_declared_ptr(objectNameList, dropStatisticsStmt->objects)
{ {
Oid statsOid = get_statistics_object_oid(objectNameList, Oid statsOid = get_statistics_object_oid(objectNameList,
dropStatisticsStmt->missing_ok); dropStatisticsStmt->missing_ok);
@ -535,7 +535,7 @@ GetExplicitStatisticsCommandList(Oid relationId)
int saveNestLevel = PushEmptySearchPath(); int saveNestLevel = PushEmptySearchPath();
Oid statisticsId = InvalidOid; Oid statisticsId = InvalidOid;
foreach_oid(statisticsId, statisticsIdList) foreach_declared_oid(statisticsId, statisticsIdList)
{ {
/* we need create commands for already created stats before distribution */ /* we need create commands for already created stats before distribution */
Datum commandText = DirectFunctionCall1(pg_get_statisticsobjdef, Datum commandText = DirectFunctionCall1(pg_get_statisticsobjdef,
@ -606,7 +606,7 @@ GetExplicitStatisticsSchemaIdList(Oid relationId)
RelationClose(relation); RelationClose(relation);
Oid statsId = InvalidOid; Oid statsId = InvalidOid;
foreach_oid(statsId, statsIdList) foreach_declared_oid(statsId, statsIdList)
{ {
HeapTuple heapTuple = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statsId)); HeapTuple heapTuple = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statsId));
if (!HeapTupleIsValid(heapTuple)) if (!HeapTupleIsValid(heapTuple))
@ -651,14 +651,15 @@ GetAlterIndexStatisticsCommands(Oid indexOid)
} }
Form_pg_attribute targetAttr = (Form_pg_attribute) GETSTRUCT(attTuple); Form_pg_attribute targetAttr = (Form_pg_attribute) GETSTRUCT(attTuple);
if (targetAttr->attstattarget != DEFAULT_STATISTICS_TARGET) int32 targetAttstattarget = getAttstattarget_compat(attTuple);
if (targetAttstattarget != DEFAULT_STATISTICS_TARGET)
{ {
char *indexNameWithSchema = generate_qualified_relation_name(indexOid); char *indexNameWithSchema = generate_qualified_relation_name(indexOid);
char *command = char *command =
GenerateAlterIndexColumnSetStatsCommand(indexNameWithSchema, GenerateAlterIndexColumnSetStatsCommand(indexNameWithSchema,
targetAttr->attnum, targetAttr->attnum,
targetAttr->attstattarget); targetAttstattarget);
alterIndexStatisticsCommandList = alterIndexStatisticsCommandList =
lappend(alterIndexStatisticsCommandList, lappend(alterIndexStatisticsCommandList,
@ -773,9 +774,10 @@ CreateAlterCommandIfTargetNotDefault(Oid statsOid)
} }
Form_pg_statistic_ext statisticsForm = (Form_pg_statistic_ext) GETSTRUCT(tup); Form_pg_statistic_ext statisticsForm = (Form_pg_statistic_ext) GETSTRUCT(tup);
int16 currentStxstattarget = getStxstattarget_compat(tup);
ReleaseSysCache(tup); ReleaseSysCache(tup);
if (statisticsForm->stxstattarget == -1) if (currentStxstattarget == -1)
{ {
return NULL; return NULL;
} }
@ -785,7 +787,8 @@ CreateAlterCommandIfTargetNotDefault(Oid statsOid)
char *schemaName = get_namespace_name(statisticsForm->stxnamespace); char *schemaName = get_namespace_name(statisticsForm->stxnamespace);
char *statName = NameStr(statisticsForm->stxname); char *statName = NameStr(statisticsForm->stxname);
alterStatsStmt->stxstattarget = statisticsForm->stxstattarget; alterStatsStmt->stxstattarget = getAlterStatsStxstattarget_compat(
currentStxstattarget);
alterStatsStmt->defnames = list_make2(makeString(schemaName), makeString(statName)); alterStatsStmt->defnames = list_make2(makeString(schemaName), makeString(statName));
return DeparseAlterStatisticsStmt((Node *) alterStatsStmt); return DeparseAlterStatisticsStmt((Node *) alterStatsStmt);

View File

@ -154,7 +154,7 @@ PreprocessDropTableStmt(Node *node, const char *queryString,
Assert(dropTableStatement->removeType == OBJECT_TABLE); Assert(dropTableStatement->removeType == OBJECT_TABLE);
List *tableNameList = NULL; List *tableNameList = NULL;
foreach_ptr(tableNameList, dropTableStatement->objects) foreach_declared_ptr(tableNameList, dropTableStatement->objects)
{ {
RangeVar *tableRangeVar = makeRangeVarFromNameList(tableNameList); RangeVar *tableRangeVar = makeRangeVarFromNameList(tableNameList);
bool missingOK = true; bool missingOK = true;
@ -202,7 +202,7 @@ PreprocessDropTableStmt(Node *node, const char *queryString,
SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION);
Oid partitionRelationId = InvalidOid; Oid partitionRelationId = InvalidOid;
foreach_oid(partitionRelationId, partitionList) foreach_declared_oid(partitionRelationId, partitionList)
{ {
char *detachPartitionCommand = char *detachPartitionCommand =
GenerateDetachPartitionCommand(partitionRelationId); GenerateDetachPartitionCommand(partitionRelationId);
@ -263,7 +263,7 @@ PostprocessCreateTableStmt(CreateStmt *createStatement, const char *queryString)
} }
RangeVar *parentRelation = NULL; RangeVar *parentRelation = NULL;
foreach_ptr(parentRelation, createStatement->inhRelations) foreach_declared_ptr(parentRelation, createStatement->inhRelations)
{ {
Oid parentRelationId = RangeVarGetRelid(parentRelation, NoLock, Oid parentRelationId = RangeVarGetRelid(parentRelation, NoLock,
missingOk); missingOk);
@ -480,7 +480,7 @@ PreprocessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement,
{ {
List *commandList = alterTableStatement->cmds; List *commandList = alterTableStatement->cmds;
AlterTableCmd *alterTableCommand = NULL; AlterTableCmd *alterTableCommand = NULL;
foreach_ptr(alterTableCommand, commandList) foreach_declared_ptr(alterTableCommand, commandList)
{ {
if (alterTableCommand->subtype == AT_AttachPartition) if (alterTableCommand->subtype == AT_AttachPartition)
{ {
@ -792,7 +792,7 @@ ChooseForeignKeyConstraintNameAddition(List *columnNames)
String *columnNameString = NULL; String *columnNameString = NULL;
foreach_ptr(columnNameString, columnNames) foreach_declared_ptr(columnNameString, columnNames)
{ {
const char *name = strVal(columnNameString); const char *name = strVal(columnNameString);
@ -1153,7 +1153,6 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
{ {
AlterTableStmt *stmtCopy = copyObject(alterTableStatement); AlterTableStmt *stmtCopy = copyObject(alterTableStatement);
stmtCopy->objtype = OBJECT_SEQUENCE; stmtCopy->objtype = OBJECT_SEQUENCE;
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* it must be ALTER TABLE .. OWNER TO .. * it must be ALTER TABLE .. OWNER TO ..
@ -1163,16 +1162,6 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
*/ */
return PreprocessSequenceAlterTableStmt((Node *) stmtCopy, alterTableCommand, return PreprocessSequenceAlterTableStmt((Node *) stmtCopy, alterTableCommand,
processUtilityContext); processUtilityContext);
#else
/*
* it must be ALTER TABLE .. OWNER TO .. command
* since this is the only ALTER command of a sequence that
* passes through an AlterTableStmt
*/
return PreprocessAlterSequenceOwnerStmt((Node *) stmtCopy, alterTableCommand,
processUtilityContext);
#endif
} }
else if (relKind == RELKIND_VIEW) else if (relKind == RELKIND_VIEW)
{ {
@ -1314,7 +1303,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
AlterTableCmd *newCmd = makeNode(AlterTableCmd); AlterTableCmd *newCmd = makeNode(AlterTableCmd);
AlterTableCmd *command = NULL; AlterTableCmd *command = NULL;
foreach_ptr(command, commandList) foreach_declared_ptr(command, commandList)
{ {
AlterTableType alterTableType = command->subtype; AlterTableType alterTableType = command->subtype;
@ -1418,7 +1407,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
List *columnConstraints = columnDefinition->constraints; List *columnConstraints = columnDefinition->constraints;
Constraint *constraint = NULL; Constraint *constraint = NULL;
foreach_ptr(constraint, columnConstraints) foreach_declared_ptr(constraint, columnConstraints)
{ {
if (constraint->contype == CONSTR_FOREIGN) if (constraint->contype == CONSTR_FOREIGN)
{ {
@ -1442,7 +1431,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
deparseAT = true; deparseAT = true;
constraint = NULL; constraint = NULL;
foreach_ptr(constraint, columnConstraints) foreach_declared_ptr(constraint, columnConstraints)
{ {
if (ConstrTypeCitusCanDefaultName(constraint->contype)) if (ConstrTypeCitusCanDefaultName(constraint->contype))
{ {
@ -1467,7 +1456,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
*/ */
constraint = NULL; constraint = NULL;
int constraintIdx = 0; int constraintIdx = 0;
foreach_ptr(constraint, columnConstraints) foreach_declared_ptr(constraint, columnConstraints)
{ {
if (constraint->contype == CONSTR_DEFAULT) if (constraint->contype == CONSTR_DEFAULT)
{ {
@ -1696,7 +1685,7 @@ DeparserSupportsAlterTableAddColumn(AlterTableStmt *alterTableStatement,
{ {
ColumnDef *columnDefinition = (ColumnDef *) addColumnSubCommand->def; ColumnDef *columnDefinition = (ColumnDef *) addColumnSubCommand->def;
Constraint *constraint = NULL; Constraint *constraint = NULL;
foreach_ptr(constraint, columnDefinition->constraints) foreach_declared_ptr(constraint, columnDefinition->constraints)
{ {
if (constraint->contype == CONSTR_CHECK) if (constraint->contype == CONSTR_CHECK)
{ {
@ -1792,7 +1781,7 @@ static bool
RelationIdListContainsCitusTableType(List *relationIdList, CitusTableType citusTableType) RelationIdListContainsCitusTableType(List *relationIdList, CitusTableType citusTableType)
{ {
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList) foreach_declared_oid(relationId, relationIdList)
{ {
if (IsCitusTableType(relationId, citusTableType)) if (IsCitusTableType(relationId, citusTableType))
{ {
@ -1812,7 +1801,7 @@ static bool
RelationIdListContainsPostgresTable(List *relationIdList) RelationIdListContainsPostgresTable(List *relationIdList)
{ {
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList) foreach_declared_oid(relationId, relationIdList)
{ {
if (OidIsValid(relationId) && !IsCitusTable(relationId)) if (OidIsValid(relationId) && !IsCitusTable(relationId))
{ {
@ -1851,7 +1840,7 @@ ConvertPostgresLocalTablesToCitusLocalTables(AlterTableStmt *alterTableStatement
* change in below loop due to CreateCitusLocalTable. * change in below loop due to CreateCitusLocalTable.
*/ */
RangeVar *relationRangeVar; RangeVar *relationRangeVar;
foreach_ptr(relationRangeVar, relationRangeVarList) foreach_declared_ptr(relationRangeVar, relationRangeVarList)
{ {
List *commandList = alterTableStatement->cmds; List *commandList = alterTableStatement->cmds;
LOCKMODE lockMode = AlterTableGetLockLevel(commandList); LOCKMODE lockMode = AlterTableGetLockLevel(commandList);
@ -1979,7 +1968,7 @@ RangeVarListHasLocalRelationConvertedByUser(List *relationRangeVarList,
AlterTableStmt *alterTableStatement) AlterTableStmt *alterTableStatement)
{ {
RangeVar *relationRangeVar; RangeVar *relationRangeVar;
foreach_ptr(relationRangeVar, relationRangeVarList) foreach_declared_ptr(relationRangeVar, relationRangeVarList)
{ {
/* /*
* Here we iterate the relation list, and if at least one of the relations * Here we iterate the relation list, and if at least one of the relations
@ -2076,7 +2065,7 @@ GetAlterTableAddFKeyConstraintList(AlterTableStmt *alterTableStatement)
List *commandList = alterTableStatement->cmds; List *commandList = alterTableStatement->cmds;
AlterTableCmd *command = NULL; AlterTableCmd *command = NULL;
foreach_ptr(command, commandList) foreach_declared_ptr(command, commandList)
{ {
List *commandForeignKeyConstraintList = List *commandForeignKeyConstraintList =
GetAlterTableCommandFKeyConstraintList(command); GetAlterTableCommandFKeyConstraintList(command);
@ -2116,7 +2105,7 @@ GetAlterTableCommandFKeyConstraintList(AlterTableCmd *command)
List *columnConstraints = columnDefinition->constraints; List *columnConstraints = columnDefinition->constraints;
Constraint *constraint = NULL; Constraint *constraint = NULL;
foreach_ptr(constraint, columnConstraints) foreach_declared_ptr(constraint, columnConstraints)
{ {
if (constraint->contype == CONSTR_FOREIGN) if (constraint->contype == CONSTR_FOREIGN)
{ {
@ -2139,7 +2128,7 @@ GetRangeVarListFromFKeyConstraintList(List *fKeyConstraintList)
List *rightRelationRangeVarList = NIL; List *rightRelationRangeVarList = NIL;
Constraint *fKeyConstraint = NULL; Constraint *fKeyConstraint = NULL;
foreach_ptr(fKeyConstraint, fKeyConstraintList) foreach_declared_ptr(fKeyConstraint, fKeyConstraintList)
{ {
RangeVar *rightRelationRangeVar = fKeyConstraint->pktable; RangeVar *rightRelationRangeVar = fKeyConstraint->pktable;
rightRelationRangeVarList = lappend(rightRelationRangeVarList, rightRelationRangeVarList = lappend(rightRelationRangeVarList,
@ -2160,7 +2149,7 @@ GetRelationIdListFromRangeVarList(List *rangeVarList, LOCKMODE lockMode, bool mi
List *relationIdList = NIL; List *relationIdList = NIL;
RangeVar *rangeVar = NULL; RangeVar *rangeVar = NULL;
foreach_ptr(rangeVar, rangeVarList) foreach_declared_ptr(rangeVar, rangeVarList)
{ {
Oid rightRelationId = RangeVarGetRelid(rangeVar, lockMode, missingOk); Oid rightRelationId = RangeVarGetRelid(rangeVar, lockMode, missingOk);
relationIdList = lappend_oid(relationIdList, rightRelationId); relationIdList = lappend_oid(relationIdList, rightRelationId);
@ -2234,7 +2223,7 @@ AlterTableDropsForeignKey(AlterTableStmt *alterTableStatement)
Oid relationId = AlterTableLookupRelation(alterTableStatement, lockmode); Oid relationId = AlterTableLookupRelation(alterTableStatement, lockmode);
AlterTableCmd *command = NULL; AlterTableCmd *command = NULL;
foreach_ptr(command, alterTableStatement->cmds) foreach_declared_ptr(command, alterTableStatement->cmds)
{ {
AlterTableType alterTableType = command->subtype; AlterTableType alterTableType = command->subtype;
@ -2296,7 +2285,7 @@ AnyForeignKeyDependsOnIndex(Oid indexId)
GetPgDependTuplesForDependingObjects(dependentObjectClassId, dependentObjectId); GetPgDependTuplesForDependingObjects(dependentObjectClassId, dependentObjectId);
HeapTuple dependencyTuple = NULL; HeapTuple dependencyTuple = NULL;
foreach_ptr(dependencyTuple, dependencyTupleList) foreach_declared_ptr(dependencyTuple, dependencyTupleList)
{ {
Form_pg_depend dependencyForm = (Form_pg_depend) GETSTRUCT(dependencyTuple); Form_pg_depend dependencyForm = (Form_pg_depend) GETSTRUCT(dependencyTuple);
Oid dependingClassId = dependencyForm->classid; Oid dependingClassId = dependencyForm->classid;
@ -2484,7 +2473,7 @@ SkipForeignKeyValidationIfConstraintIsFkey(AlterTableStmt *alterTableStatement,
* shards anyway. * shards anyway.
*/ */
AlterTableCmd *command = NULL; AlterTableCmd *command = NULL;
foreach_ptr(command, alterTableStatement->cmds) foreach_declared_ptr(command, alterTableStatement->cmds)
{ {
AlterTableType alterTableType = command->subtype; AlterTableType alterTableType = command->subtype;
@ -2565,7 +2554,7 @@ ErrorIfAlterDropsPartitionColumn(AlterTableStmt *alterTableStatement)
/* then check if any of subcommands drop partition column.*/ /* then check if any of subcommands drop partition column.*/
List *commandList = alterTableStatement->cmds; List *commandList = alterTableStatement->cmds;
AlterTableCmd *command = NULL; AlterTableCmd *command = NULL;
foreach_ptr(command, commandList) foreach_declared_ptr(command, commandList)
{ {
AlterTableType alterTableType = command->subtype; AlterTableType alterTableType = command->subtype;
if (alterTableType == AT_DropColumn) if (alterTableType == AT_DropColumn)
@ -2634,7 +2623,7 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
List *commandList = alterTableStatement->cmds; List *commandList = alterTableStatement->cmds;
AlterTableCmd *command = NULL; AlterTableCmd *command = NULL;
foreach_ptr(command, commandList) foreach_declared_ptr(command, commandList)
{ {
AlterTableType alterTableType = command->subtype; AlterTableType alterTableType = command->subtype;
@ -2670,7 +2659,7 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
} }
Constraint *constraint = NULL; Constraint *constraint = NULL;
foreach_ptr(constraint, columnConstraints) foreach_declared_ptr(constraint, columnConstraints)
{ {
if (constraint->conname == NULL && if (constraint->conname == NULL &&
(constraint->contype == CONSTR_PRIMARY || (constraint->contype == CONSTR_PRIMARY ||
@ -2690,7 +2679,7 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
* that sequence is supported * that sequence is supported
*/ */
constraint = NULL; constraint = NULL;
foreach_ptr(constraint, columnConstraints) foreach_declared_ptr(constraint, columnConstraints)
{ {
if (constraint->contype == CONSTR_DEFAULT) if (constraint->contype == CONSTR_DEFAULT)
{ {
@ -2802,7 +2791,7 @@ FixAlterTableStmtIndexNames(AlterTableStmt *alterTableStatement)
List *commandList = alterTableStatement->cmds; List *commandList = alterTableStatement->cmds;
AlterTableCmd *command = NULL; AlterTableCmd *command = NULL;
foreach_ptr(command, commandList) foreach_declared_ptr(command, commandList)
{ {
AlterTableType alterTableType = command->subtype; AlterTableType alterTableType = command->subtype;
@ -3165,7 +3154,7 @@ ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod,
List *indexOidList = RelationGetIndexList(relation); List *indexOidList = RelationGetIndexList(relation);
Oid indexOid = InvalidOid; Oid indexOid = InvalidOid;
foreach_oid(indexOid, indexOidList) foreach_declared_oid(indexOid, indexOidList)
{ {
Relation indexDesc = index_open(indexOid, RowExclusiveLock); Relation indexDesc = index_open(indexOid, RowExclusiveLock);
bool hasDistributionColumn = false; bool hasDistributionColumn = false;
@ -3310,7 +3299,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
/* error out if any of the subcommands are unsupported */ /* error out if any of the subcommands are unsupported */
AlterTableCmd *command = NULL; AlterTableCmd *command = NULL;
foreach_ptr(command, commandList) foreach_declared_ptr(command, commandList)
{ {
AlterTableType alterTableType = command->subtype; AlterTableType alterTableType = command->subtype;
@ -3385,7 +3374,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
Constraint *columnConstraint = NULL; Constraint *columnConstraint = NULL;
foreach_ptr(columnConstraint, column->constraints) foreach_declared_ptr(columnConstraint, column->constraints)
{ {
if (columnConstraint->contype == CONSTR_IDENTITY) if (columnConstraint->contype == CONSTR_IDENTITY)
{ {
@ -3417,7 +3406,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
List *columnConstraints = column->constraints; List *columnConstraints = column->constraints;
Constraint *constraint = NULL; Constraint *constraint = NULL;
foreach_ptr(constraint, columnConstraints) foreach_declared_ptr(constraint, columnConstraints)
{ {
if (constraint->contype == CONSTR_DEFAULT) if (constraint->contype == CONSTR_DEFAULT)
{ {
@ -3664,9 +3653,36 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
break; break;
} }
#if PG_VERSION_NUM >= PG_VERSION_15 #if PG_VERSION_NUM >= PG_VERSION_17
case AT_SetAccessMethod: case AT_SetExpression:
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg(
"ALTER TABLE ... ALTER COLUMN ... SET EXPRESSION commands "
"are currently unsupported.")));
break;
}
#endif #endif
case AT_SetAccessMethod:
{
/*
* If command->name == NULL, that means the user is trying to use
* ALTER TABLE ... SET ACCESS METHOD DEFAULT
* which we don't support currently.
*/
if (command->name == NULL)
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg(
"DEFAULT option in ALTER TABLE ... SET ACCESS METHOD "
"is currently unsupported."),
errhint(
"You can rerun the command by explicitly writing the access method name.")));
}
break;
}
case AT_SetNotNull: case AT_SetNotNull:
case AT_ReplicaIdentity: case AT_ReplicaIdentity:
case AT_ChangeOwner: case AT_ChangeOwner:
@ -3770,7 +3786,7 @@ SetupExecutionModeForAlterTable(Oid relationId, AlterTableCmd *command)
List *columnConstraints = columnDefinition->constraints; List *columnConstraints = columnDefinition->constraints;
Constraint *constraint = NULL; Constraint *constraint = NULL;
foreach_ptr(constraint, columnConstraints) foreach_declared_ptr(constraint, columnConstraints)
{ {
if (constraint->contype == CONSTR_FOREIGN) if (constraint->contype == CONSTR_FOREIGN)
{ {
@ -3970,10 +3986,10 @@ SetInterShardDDLTaskPlacementList(Task *task, ShardInterval *leftShardInterval,
List *intersectedPlacementList = NIL; List *intersectedPlacementList = NIL;
ShardPlacement *leftShardPlacement = NULL; ShardPlacement *leftShardPlacement = NULL;
foreach_ptr(leftShardPlacement, leftShardPlacementList) foreach_declared_ptr(leftShardPlacement, leftShardPlacementList)
{ {
ShardPlacement *rightShardPlacement = NULL; ShardPlacement *rightShardPlacement = NULL;
foreach_ptr(rightShardPlacement, rightShardPlacementList) foreach_declared_ptr(rightShardPlacement, rightShardPlacementList)
{ {
if (leftShardPlacement->nodeId == rightShardPlacement->nodeId) if (leftShardPlacement->nodeId == rightShardPlacement->nodeId)
{ {

View File

@ -57,9 +57,6 @@ static void ExtractDropStmtTriggerAndRelationName(DropStmt *dropTriggerStmt,
static void ErrorIfDropStmtDropsMultipleTriggers(DropStmt *dropTriggerStmt); static void ErrorIfDropStmtDropsMultipleTriggers(DropStmt *dropTriggerStmt);
static char * GetTriggerNameById(Oid triggerId); static char * GetTriggerNameById(Oid triggerId);
static int16 GetTriggerTypeById(Oid triggerId); static int16 GetTriggerTypeById(Oid triggerId);
#if (PG_VERSION_NUM < PG_VERSION_15)
static void ErrorOutIfCloneTrigger(Oid tgrelid, const char *tgname);
#endif
/* GUC that overrides trigger checks for distributed tables and reference tables */ /* GUC that overrides trigger checks for distributed tables and reference tables */
@ -81,7 +78,7 @@ GetExplicitTriggerCommandList(Oid relationId)
List *triggerIdList = GetExplicitTriggerIdList(relationId); List *triggerIdList = GetExplicitTriggerIdList(relationId);
Oid triggerId = InvalidOid; Oid triggerId = InvalidOid;
foreach_oid(triggerId, triggerIdList) foreach_declared_oid(triggerId, triggerIdList)
{ {
bool prettyOutput = false; bool prettyOutput = false;
Datum commandText = DirectFunctionCall2(pg_get_triggerdef_ext, Datum commandText = DirectFunctionCall2(pg_get_triggerdef_ext,
@ -404,40 +401,6 @@ CreateTriggerEventExtendNames(CreateTrigStmt *createTriggerStmt, char *schemaNam
} }
/*
* PreprocessAlterTriggerRenameStmt is called before a ALTER TRIGGER RENAME
* command has been executed by standard process utility. This function errors
* out if we are trying to rename a child trigger on a partition of a distributed
* table. In PG15, this is not allowed anyway.
*/
List *
PreprocessAlterTriggerRenameStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
#if (PG_VERSION_NUM < PG_VERSION_15)
RenameStmt *renameTriggerStmt = castNode(RenameStmt, node);
Assert(renameTriggerStmt->renameType == OBJECT_TRIGGER);
RangeVar *relation = renameTriggerStmt->relation;
bool missingOk = false;
Oid relationId = RangeVarGetRelid(relation, ALTER_TRIGGER_LOCK_MODE, missingOk);
if (!IsCitusTable(relationId))
{
return NIL;
}
EnsureCoordinator();
ErrorOutForTriggerIfNotSupported(relationId);
ErrorOutIfCloneTrigger(relationId, renameTriggerStmt->subname);
#endif
return NIL;
}
/* /*
* PostprocessAlterTriggerRenameStmt is called after a ALTER TRIGGER RENAME * PostprocessAlterTriggerRenameStmt is called after a ALTER TRIGGER RENAME
* command has been executed by standard process utility. This function errors * command has been executed by standard process utility. This function errors
@ -742,7 +705,7 @@ ErrorIfRelationHasUnsupportedTrigger(Oid relationId)
List *relationTriggerList = GetExplicitTriggerIdList(relationId); List *relationTriggerList = GetExplicitTriggerIdList(relationId);
Oid triggerId = InvalidOid; Oid triggerId = InvalidOid;
foreach_oid(triggerId, relationTriggerList) foreach_declared_oid(triggerId, relationTriggerList)
{ {
ObjectAddress triggerObjectAddress = InvalidObjectAddress; ObjectAddress triggerObjectAddress = InvalidObjectAddress;
ObjectAddressSet(triggerObjectAddress, TriggerRelationId, triggerId); ObjectAddressSet(triggerObjectAddress, TriggerRelationId, triggerId);
@ -759,64 +722,6 @@ ErrorIfRelationHasUnsupportedTrigger(Oid relationId)
} }
#if (PG_VERSION_NUM < PG_VERSION_15)
/*
* ErrorOutIfCloneTrigger is a helper function to error
* out if we are trying to rename a child trigger on a
* partition of a distributed table.
* A lot of this code is borrowed from PG15 because
* renaming clone triggers isn't allowed in PG15 anymore.
*/
static void
ErrorOutIfCloneTrigger(Oid tgrelid, const char *tgname)
{
HeapTuple tuple;
ScanKeyData key[2];
Relation tgrel = table_open(TriggerRelationId, RowExclusiveLock);
/*
* Search for the trigger to modify.
*/
ScanKeyInit(&key[0],
Anum_pg_trigger_tgrelid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(tgrelid));
ScanKeyInit(&key[1],
Anum_pg_trigger_tgname,
BTEqualStrategyNumber, F_NAMEEQ,
CStringGetDatum(tgname));
SysScanDesc tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
NULL, 2, key);
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
{
Form_pg_trigger trigform = (Form_pg_trigger) GETSTRUCT(tuple);
/*
* If the trigger descends from a trigger on a parent partitioned
* table, reject the rename.
* Appended shard ids to find the trigger on the partition's shards
* are not correct. Hence we would fail to find the trigger on the
* partition's shard.
*/
if (OidIsValid(trigform->tgparentid))
{
ereport(ERROR, (
errmsg(
"cannot rename child triggers on distributed partitions")));
}
}
systable_endscan(tgscan);
table_close(tgrel, RowExclusiveLock);
}
#endif
/* /*
* GetDropTriggerStmtRelation takes a DropStmt for a trigger object and returns * GetDropTriggerStmtRelation takes a DropStmt for a trigger object and returns
* RangeVar for the relation that owns the trigger. * RangeVar for the relation that owns the trigger.

View File

@ -135,7 +135,7 @@ TruncateTaskList(Oid relationId)
LockShardListMetadata(shardIntervalList, ShareLock); LockShardListMetadata(shardIntervalList, ShareLock);
ShardInterval *shardInterval = NULL; ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList) foreach_declared_ptr(shardInterval, shardIntervalList)
{ {
uint64 shardId = shardInterval->shardId; uint64 shardId = shardInterval->shardId;
char *shardRelationName = pstrdup(relationName); char *shardRelationName = pstrdup(relationName);
@ -264,7 +264,7 @@ ErrorIfUnsupportedTruncateStmt(TruncateStmt *truncateStatement)
{ {
List *relationList = truncateStatement->relations; List *relationList = truncateStatement->relations;
RangeVar *rangeVar = NULL; RangeVar *rangeVar = NULL;
foreach_ptr(rangeVar, relationList) foreach_declared_ptr(rangeVar, relationList)
{ {
Oid relationId = RangeVarGetRelid(rangeVar, NoLock, false); Oid relationId = RangeVarGetRelid(rangeVar, NoLock, false);
@ -294,7 +294,7 @@ static void
EnsurePartitionTableNotReplicatedForTruncate(TruncateStmt *truncateStatement) EnsurePartitionTableNotReplicatedForTruncate(TruncateStmt *truncateStatement)
{ {
RangeVar *rangeVar = NULL; RangeVar *rangeVar = NULL;
foreach_ptr(rangeVar, truncateStatement->relations) foreach_declared_ptr(rangeVar, truncateStatement->relations)
{ {
Oid relationId = RangeVarGetRelid(rangeVar, NoLock, false); Oid relationId = RangeVarGetRelid(rangeVar, NoLock, false);
@ -322,7 +322,7 @@ ExecuteTruncateStmtSequentialIfNecessary(TruncateStmt *command)
bool failOK = false; bool failOK = false;
RangeVar *rangeVar = NULL; RangeVar *rangeVar = NULL;
foreach_ptr(rangeVar, relationList) foreach_declared_ptr(rangeVar, relationList)
{ {
Oid relationId = RangeVarGetRelid(rangeVar, NoLock, failOK); Oid relationId = RangeVarGetRelid(rangeVar, NoLock, failOK);

View File

@ -454,7 +454,7 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
bool analyze = false; bool analyze = false;
DefElem *option = NULL; DefElem *option = NULL;
foreach_ptr(option, explainStmt->options) foreach_declared_ptr(option, explainStmt->options)
{ {
if (strcmp(option->defname, "analyze") == 0) if (strcmp(option->defname, "analyze") == 0)
{ {
@ -695,7 +695,7 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
{ {
AlterTableStmt *alterTableStmt = (AlterTableStmt *) parsetree; AlterTableStmt *alterTableStmt = (AlterTableStmt *) parsetree;
AlterTableCmd *command = NULL; AlterTableCmd *command = NULL;
foreach_ptr(command, alterTableStmt->cmds) foreach_declared_ptr(command, alterTableStmt->cmds)
{ {
AlterTableType alterTableType = command->subtype; AlterTableType alterTableType = command->subtype;
@ -879,7 +879,7 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
} }
DDLJob *ddlJob = NULL; DDLJob *ddlJob = NULL;
foreach_ptr(ddlJob, ddlJobs) foreach_declared_ptr(ddlJob, ddlJobs)
{ {
ExecuteDistributedDDLJob(ddlJob); ExecuteDistributedDDLJob(ddlJob);
} }
@ -939,7 +939,7 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
{ {
List *addresses = GetObjectAddressListFromParseTree(parsetree, false, true); List *addresses = GetObjectAddressListFromParseTree(parsetree, false, true);
ObjectAddress *address = NULL; ObjectAddress *address = NULL;
foreach_ptr(address, addresses) foreach_declared_ptr(address, addresses)
{ {
MarkObjectDistributed(address); MarkObjectDistributed(address);
TrackPropagatedObject(address); TrackPropagatedObject(address);
@ -962,7 +962,7 @@ UndistributeDisconnectedCitusLocalTables(void)
citusLocalTableIdList = SortList(citusLocalTableIdList, CompareOids); citusLocalTableIdList = SortList(citusLocalTableIdList, CompareOids);
Oid citusLocalTableId = InvalidOid; Oid citusLocalTableId = InvalidOid;
foreach_oid(citusLocalTableId, citusLocalTableIdList) foreach_declared_oid(citusLocalTableId, citusLocalTableIdList)
{ {
/* acquire ShareRowExclusiveLock to prevent concurrent foreign key creation */ /* acquire ShareRowExclusiveLock to prevent concurrent foreign key creation */
LOCKMODE lockMode = ShareRowExclusiveLock; LOCKMODE lockMode = ShareRowExclusiveLock;
@ -1349,7 +1349,7 @@ CurrentSearchPath(void)
bool schemaAdded = false; bool schemaAdded = false;
Oid searchPathOid = InvalidOid; Oid searchPathOid = InvalidOid;
foreach_oid(searchPathOid, searchPathList) foreach_declared_oid(searchPathOid, searchPathList)
{ {
char *schemaName = get_namespace_name(searchPathOid); char *schemaName = get_namespace_name(searchPathOid);
@ -1483,7 +1483,7 @@ DDLTaskList(Oid relationId, const char *commandString)
LockShardListMetadata(shardIntervalList, ShareLock); LockShardListMetadata(shardIntervalList, ShareLock);
ShardInterval *shardInterval = NULL; ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList) foreach_declared_ptr(shardInterval, shardIntervalList)
{ {
uint64 shardId = shardInterval->shardId; uint64 shardId = shardInterval->shardId;
StringInfo applyCommand = makeStringInfo(); StringInfo applyCommand = makeStringInfo();
@ -1525,10 +1525,10 @@ NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands,
{ {
List *ddlJobs = NodeDDLTaskList(targets, commands); List *ddlJobs = NodeDDLTaskList(targets, commands);
DDLJob *ddlJob = NULL; DDLJob *ddlJob = NULL;
foreach_ptr(ddlJob, ddlJobs) foreach_declared_ptr(ddlJob, ddlJobs)
{ {
Task *task = NULL; Task *task = NULL;
foreach_ptr(task, ddlJob->taskList) foreach_declared_ptr(task, ddlJob->taskList)
{ {
task->cannotBeExecutedInTransaction = true; task->cannotBeExecutedInTransaction = true;
} }
@ -1564,7 +1564,7 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands)
SetTaskQueryStringList(task, commands); SetTaskQueryStringList(task, commands);
WorkerNode *workerNode = NULL; WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodes) foreach_declared_ptr(workerNode, workerNodes)
{ {
ShardPlacement *targetPlacement = CitusMakeNode(ShardPlacement); ShardPlacement *targetPlacement = CitusMakeNode(ShardPlacement);
targetPlacement->nodeName = workerNode->workerName; targetPlacement->nodeName = workerNode->workerName;

View File

@ -135,7 +135,7 @@ VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams)
List *relationIdList = NIL; List *relationIdList = NIL;
RangeVar *vacuumRelation = NULL; RangeVar *vacuumRelation = NULL;
foreach_ptr(vacuumRelation, vacuumRelationList) foreach_declared_ptr(vacuumRelation, vacuumRelationList)
{ {
/* /*
* If skip_locked option is enabled, we are skipping that relation * If skip_locked option is enabled, we are skipping that relation
@ -164,7 +164,7 @@ static bool
IsDistributedVacuumStmt(List *vacuumRelationIdList) IsDistributedVacuumStmt(List *vacuumRelationIdList)
{ {
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, vacuumRelationIdList) foreach_declared_oid(relationId, vacuumRelationIdList)
{ {
if (OidIsValid(relationId) && IsCitusTable(relationId)) if (OidIsValid(relationId) && IsCitusTable(relationId))
{ {
@ -187,7 +187,7 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList,
int relationIndex = 0; int relationIndex = 0;
Oid relationId = InvalidOid; Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList) foreach_declared_oid(relationId, relationIdList)
{ {
if (IsCitusTable(relationId)) if (IsCitusTable(relationId))
{ {
@ -252,7 +252,7 @@ VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColum
LockShardListMetadata(shardIntervalList, ShareLock); LockShardListMetadata(shardIntervalList, ShareLock);
ShardInterval *shardInterval = NULL; ShardInterval *shardInterval = NULL;
foreach_ptr(shardInterval, shardIntervalList) foreach_declared_ptr(shardInterval, shardIntervalList)
{ {
uint64 shardId = shardInterval->shardId; uint64 shardId = shardInterval->shardId;
char *shardRelationName = pstrdup(relationName); char *shardRelationName = pstrdup(relationName);
@ -473,7 +473,7 @@ DeparseVacuumColumnNames(List *columnNameList)
appendStringInfoString(columnNames, " ("); appendStringInfoString(columnNames, " (");
String *columnName = NULL; String *columnName = NULL;
foreach_ptr(columnName, columnNameList) foreach_declared_ptr(columnName, columnNameList)
{ {
appendStringInfo(columnNames, "%s,", strVal(columnName)); appendStringInfo(columnNames, "%s,", strVal(columnName));
} }
@ -508,7 +508,7 @@ ExtractVacuumTargetRels(VacuumStmt *vacuumStmt)
List *vacuumList = NIL; List *vacuumList = NIL;
VacuumRelation *vacuumRelation = NULL; VacuumRelation *vacuumRelation = NULL;
foreach_ptr(vacuumRelation, vacuumStmt->rels) foreach_declared_ptr(vacuumRelation, vacuumStmt->rels)
{ {
vacuumList = lappend(vacuumList, vacuumRelation->relation); vacuumList = lappend(vacuumList, vacuumRelation->relation);
} }
@ -552,7 +552,7 @@ VacuumStmtParams(VacuumStmt *vacstmt)
/* Parse options list */ /* Parse options list */
DefElem *opt = NULL; DefElem *opt = NULL;
foreach_ptr(opt, vacstmt->options) foreach_declared_ptr(opt, vacstmt->options)
{ {
/* Parse common options for VACUUM and ANALYZE */ /* Parse common options for VACUUM and ANALYZE */
if (strcmp(opt->defname, "verbose") == 0) if (strcmp(opt->defname, "verbose") == 0)
@ -725,7 +725,7 @@ ExecuteUnqualifiedVacuumTasks(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumPa
int32 localNodeGroupId = GetLocalGroupId(); int32 localNodeGroupId = GetLocalGroupId();
WorkerNode *workerNode = NULL; WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodes) foreach_declared_ptr(workerNode, workerNodes)
{ {
if (workerNode->groupId != localNodeGroupId) if (workerNode->groupId != localNodeGroupId)
{ {

View File

@ -69,7 +69,7 @@ ViewHasDistributedRelationDependency(ObjectAddress *viewObjectAddress)
List *dependencies = GetAllDependenciesForObject(viewObjectAddress); List *dependencies = GetAllDependenciesForObject(viewObjectAddress);
ObjectAddress *dependency = NULL; ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies) foreach_declared_ptr(dependency, dependencies)
{ {
if (dependency->classId == RelationRelationId && IsAnyObjectDistributed( if (dependency->classId == RelationRelationId && IsAnyObjectDistributed(
list_make1(dependency))) list_make1(dependency)))
@ -304,7 +304,7 @@ DropViewStmtObjectAddress(Node *stmt, bool missing_ok, bool isPostprocess)
List *objectAddresses = NIL; List *objectAddresses = NIL;
List *possiblyQualifiedViewName = NULL; List *possiblyQualifiedViewName = NULL;
foreach_ptr(possiblyQualifiedViewName, dropStmt->objects) foreach_declared_ptr(possiblyQualifiedViewName, dropStmt->objects)
{ {
RangeVar *viewRangeVar = makeRangeVarFromNameList(possiblyQualifiedViewName); RangeVar *viewRangeVar = makeRangeVarFromNameList(possiblyQualifiedViewName);
Oid viewOid = RangeVarGetRelid(viewRangeVar, AccessShareLock, Oid viewOid = RangeVarGetRelid(viewRangeVar, AccessShareLock,
@ -332,7 +332,7 @@ FilterNameListForDistributedViews(List *viewNamesList, bool missing_ok)
List *distributedViewNames = NIL; List *distributedViewNames = NIL;
List *possiblyQualifiedViewName = NULL; List *possiblyQualifiedViewName = NULL;
foreach_ptr(possiblyQualifiedViewName, viewNamesList) foreach_declared_ptr(possiblyQualifiedViewName, viewNamesList)
{ {
char *viewName = NULL; char *viewName = NULL;
char *schemaName = NULL; char *schemaName = NULL;

View File

@ -39,6 +39,7 @@
#include "distributed/remote_commands.h" #include "distributed/remote_commands.h"
#include "distributed/run_from_same_connection.h" #include "distributed/run_from_same_connection.h"
#include "distributed/shared_connection_stats.h" #include "distributed/shared_connection_stats.h"
#include "distributed/stats/stat_counters.h"
#include "distributed/time_constants.h" #include "distributed/time_constants.h"
#include "distributed/version_compat.h" #include "distributed/version_compat.h"
#include "distributed/worker_log_messages.h" #include "distributed/worker_log_messages.h"
@ -354,6 +355,18 @@ StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port,
MultiConnection *connection = FindAvailableConnection(entry->connections, flags); MultiConnection *connection = FindAvailableConnection(entry->connections, flags);
if (connection) if (connection)
{ {
/*
* Increment the connection stat counter for the connections that are
* reused only if the connection is in a good state. Here we don't
* bother shutting down the connection or such if it is not in a good
* state but we mostly want to avoid incrementing the connection stat
* counter for a connection that the caller cannot really use.
*/
if (PQstatus(connection->pgConn) == CONNECTION_OK)
{
IncrementStatCounterForMyDb(STAT_CONNECTION_REUSED);
}
return connection; return connection;
} }
} }
@ -395,6 +408,12 @@ StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port,
dlist_delete(&connection->connectionNode); dlist_delete(&connection->connectionNode);
pfree(connection); pfree(connection);
/*
* Here we don't increment the connection stat counter for the optional
* connections that we gave up establishing due to connection throttling
* because the callers who request optional connections know how to
* survive without them.
*/
return NULL; return NULL;
} }
} }
@ -866,7 +885,8 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount)
*waitCount = 0; *waitCount = 0;
} }
WaitEventSet *waitEventSet = CreateWaitEventSet(CurrentMemoryContext, eventSetSize); WaitEventSet *waitEventSet = CreateWaitEventSet(WaitEventSetTracker_compat,
eventSetSize);
EnsureReleaseResource((MemoryContextCallbackFunction) (&FreeWaitEventSet), EnsureReleaseResource((MemoryContextCallbackFunction) (&FreeWaitEventSet),
waitEventSet); waitEventSet);
@ -879,7 +899,7 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount)
numEventsAdded += 2; numEventsAdded += 2;
MultiConnectionPollState *connectionState = NULL; MultiConnectionPollState *connectionState = NULL;
foreach_ptr(connectionState, connections) foreach_declared_ptr(connectionState, connections)
{ {
if (numEventsAdded >= eventSetSize) if (numEventsAdded >= eventSetSize)
{ {
@ -961,7 +981,7 @@ FinishConnectionListEstablishment(List *multiConnectionList)
int waitCount = 0; int waitCount = 0;
MultiConnection *connection = NULL; MultiConnection *connection = NULL;
foreach_ptr(connection, multiConnectionList) foreach_declared_ptr(connection, multiConnectionList)
{ {
MultiConnectionPollState *connectionState = MultiConnectionPollState *connectionState =
palloc0(sizeof(MultiConnectionPollState)); palloc0(sizeof(MultiConnectionPollState));
@ -981,6 +1001,14 @@ FinishConnectionListEstablishment(List *multiConnectionList)
{ {
waitCount++; waitCount++;
} }
else if (connectionState->phase == MULTI_CONNECTION_PHASE_ERROR)
{
/*
* Here we count the connections establishments that failed and that
* we won't wait anymore.
*/
IncrementStatCounterForMyDb(STAT_CONNECTION_ESTABLISHMENT_FAILED);
}
} }
/* prepare space for socket events */ /* prepare space for socket events */
@ -1025,6 +1053,11 @@ FinishConnectionListEstablishment(List *multiConnectionList)
if (event->events & WL_POSTMASTER_DEATH) if (event->events & WL_POSTMASTER_DEATH)
{ {
/*
* Here we don't increment the connection stat counter for the
* optional failed connections because this is not a connection
* failure, but a postmaster death in the local node.
*/
ereport(ERROR, (errmsg("postmaster was shut down, exiting"))); ereport(ERROR, (errmsg("postmaster was shut down, exiting")));
} }
@ -1041,6 +1074,12 @@ FinishConnectionListEstablishment(List *multiConnectionList)
* reset the memory context * reset the memory context
*/ */
MemoryContextDelete(MemoryContextSwitchTo(oldContext)); MemoryContextDelete(MemoryContextSwitchTo(oldContext));
/*
* Similarly, we don't increment the connection stat counter for the
* failed connections here because this is not a connection failure
* but a cancellation request is received.
*/
return; return;
} }
@ -1071,6 +1110,7 @@ FinishConnectionListEstablishment(List *multiConnectionList)
eventMask, NULL); eventMask, NULL);
if (!success) if (!success)
{ {
IncrementStatCounterForMyDb(STAT_CONNECTION_ESTABLISHMENT_FAILED);
ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("connection establishment for node %s:%d " errmsg("connection establishment for node %s:%d "
"failed", connection->hostname, "failed", connection->hostname,
@ -1087,7 +1127,15 @@ FinishConnectionListEstablishment(List *multiConnectionList)
*/ */
if (connectionState->phase == MULTI_CONNECTION_PHASE_CONNECTED) if (connectionState->phase == MULTI_CONNECTION_PHASE_CONNECTED)
{ {
MarkConnectionConnected(connectionState->connection); /*
* Since WaitEventSetFromMultiConnectionStates() only adds the
* connections that we haven't completed the connection
* establishment yet, here we always have a new connection.
* In other words, at this point, we surely know that we're
* not dealing with a cached connection.
*/
bool newConnection = true;
MarkConnectionConnected(connectionState->connection, newConnection);
} }
} }
} }
@ -1160,7 +1208,7 @@ static void
CloseNotReadyMultiConnectionStates(List *connectionStates) CloseNotReadyMultiConnectionStates(List *connectionStates)
{ {
MultiConnectionPollState *connectionState = NULL; MultiConnectionPollState *connectionState = NULL;
foreach_ptr(connectionState, connectionStates) foreach_declared_ptr(connectionState, connectionStates)
{ {
MultiConnection *connection = connectionState->connection; MultiConnection *connection = connectionState->connection;
@ -1171,6 +1219,8 @@ CloseNotReadyMultiConnectionStates(List *connectionStates)
/* close connection, otherwise we take up resource on the other side */ /* close connection, otherwise we take up resource on the other side */
CitusPQFinish(connection); CitusPQFinish(connection);
IncrementStatCounterForMyDb(STAT_CONNECTION_ESTABLISHMENT_FAILED);
} }
} }
@ -1583,7 +1633,7 @@ RemoteTransactionIdle(MultiConnection *connection)
* establishment time when necessary. * establishment time when necessary.
*/ */
void void
MarkConnectionConnected(MultiConnection *connection) MarkConnectionConnected(MultiConnection *connection, bool newConnection)
{ {
connection->connectionState = MULTI_CONNECTION_CONNECTED; connection->connectionState = MULTI_CONNECTION_CONNECTED;
@ -1591,6 +1641,11 @@ MarkConnectionConnected(MultiConnection *connection)
{ {
INSTR_TIME_SET_CURRENT(connection->connectionEstablishmentEnd); INSTR_TIME_SET_CURRENT(connection->connectionEstablishmentEnd);
} }
if (newConnection)
{
IncrementStatCounterForMyDb(STAT_CONNECTION_ESTABLISHMENT_SUCCEEDED);
}
} }

View File

@ -360,7 +360,7 @@ EnsureConnectionPossibilityForNodeList(List *nodeList)
nodeList = SortList(nodeList, CompareWorkerNodes); nodeList = SortList(nodeList, CompareWorkerNodes);
WorkerNode *workerNode = NULL; WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, nodeList) foreach_declared_ptr(workerNode, nodeList)
{ {
bool waitForConnection = true; bool waitForConnection = true;
EnsureConnectionPossibilityForNode(workerNode, waitForConnection); EnsureConnectionPossibilityForNode(workerNode, waitForConnection);

View File

@ -370,7 +370,7 @@ AssignPlacementListToConnection(List *placementAccessList, MultiConnection *conn
const char *userName = connection->user; const char *userName = connection->user;
ShardPlacementAccess *placementAccess = NULL; ShardPlacementAccess *placementAccess = NULL;
foreach_ptr(placementAccess, placementAccessList) foreach_declared_ptr(placementAccess, placementAccessList)
{ {
ShardPlacement *placement = placementAccess->placement; ShardPlacement *placement = placementAccess->placement;
ShardPlacementAccessType accessType = placementAccess->accessType; ShardPlacementAccessType accessType = placementAccess->accessType;
@ -533,7 +533,7 @@ FindPlacementListConnection(int flags, List *placementAccessList, const char *us
* suitable connection found for a placement in the placementAccessList. * suitable connection found for a placement in the placementAccessList.
*/ */
ShardPlacementAccess *placementAccess = NULL; ShardPlacementAccess *placementAccess = NULL;
foreach_ptr(placementAccess, placementAccessList) foreach_declared_ptr(placementAccess, placementAccessList)
{ {
ShardPlacement *placement = placementAccess->placement; ShardPlacement *placement = placementAccess->placement;
ShardPlacementAccessType accessType = placementAccess->accessType; ShardPlacementAccessType accessType = placementAccess->accessType;

View File

@ -14,6 +14,7 @@
#include "miscadmin.h" #include "miscadmin.h"
#include "pgstat.h" #include "pgstat.h"
#include "catalog/pg_collation.h"
#include "lib/stringinfo.h" #include "lib/stringinfo.h"
#include "storage/latch.h" #include "storage/latch.h"
#include "utils/builtins.h" #include "utils/builtins.h"
@ -371,8 +372,9 @@ CommandMatchesLogGrepPattern(const char *command)
if (GrepRemoteCommands && strnlen(GrepRemoteCommands, NAMEDATALEN) > 0) if (GrepRemoteCommands && strnlen(GrepRemoteCommands, NAMEDATALEN) > 0)
{ {
Datum boolDatum = Datum boolDatum =
DirectFunctionCall2(textlike, CStringGetTextDatum(command), DirectFunctionCall2Coll(textlike, DEFAULT_COLLATION_OID,
CStringGetTextDatum(GrepRemoteCommands)); CStringGetTextDatum(command),
CStringGetTextDatum(GrepRemoteCommands));
return DatumGetBool(boolDatum); return DatumGetBool(boolDatum);
} }
@ -392,7 +394,7 @@ void
ExecuteCriticalRemoteCommandList(MultiConnection *connection, List *commandList) ExecuteCriticalRemoteCommandList(MultiConnection *connection, List *commandList)
{ {
const char *command = NULL; const char *command = NULL;
foreach_ptr(command, commandList) foreach_declared_ptr(command, commandList)
{ {
ExecuteCriticalRemoteCommand(connection, command); ExecuteCriticalRemoteCommand(connection, command);
} }
@ -435,7 +437,7 @@ ExecuteRemoteCommandInConnectionList(List *nodeConnectionList, const char *comma
{ {
MultiConnection *connection = NULL; MultiConnection *connection = NULL;
foreach_ptr(connection, nodeConnectionList) foreach_declared_ptr(connection, nodeConnectionList)
{ {
int querySent = SendRemoteCommand(connection, command); int querySent = SendRemoteCommand(connection, command);
@ -446,7 +448,7 @@ ExecuteRemoteCommandInConnectionList(List *nodeConnectionList, const char *comma
} }
/* Process the result */ /* Process the result */
foreach_ptr(connection, nodeConnectionList) foreach_declared_ptr(connection, nodeConnectionList)
{ {
bool raiseInterrupts = true; bool raiseInterrupts = true;
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
@ -887,7 +889,7 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts)
/* convert connection list to an array such that we can move items around */ /* convert connection list to an array such that we can move items around */
MultiConnection *connectionItem = NULL; MultiConnection *connectionItem = NULL;
foreach_ptr(connectionItem, connectionList) foreach_declared_ptr(connectionItem, connectionList)
{ {
allConnections[connectionIndex] = connectionItem; allConnections[connectionIndex] = connectionItem;
connectionReady[connectionIndex] = false; connectionReady[connectionIndex] = false;
@ -1130,7 +1132,7 @@ BuildWaitEventSet(MultiConnection **allConnections, int totalConnectionCount,
/* allocate pending connections + 2 for the signal latch and postmaster death */ /* allocate pending connections + 2 for the signal latch and postmaster death */
/* (CreateWaitEventSet makes room for pgwin32_signal_event automatically) */ /* (CreateWaitEventSet makes room for pgwin32_signal_event automatically) */
WaitEventSet *waitEventSet = CreateWaitEventSet(CurrentMemoryContext, WaitEventSet *waitEventSet = CreateWaitEventSet(WaitEventSetTracker_compat,
pendingConnectionCount + 2); pendingConnectionCount + 2);
for (int connectionIndex = 0; connectionIndex < pendingConnectionCount; for (int connectionIndex = 0; connectionIndex < pendingConnectionCount;

View File

@ -614,16 +614,6 @@ WaitForSharedConnection(void)
void void
InitializeSharedConnectionStats(void) InitializeSharedConnectionStats(void)
{ {
/* on PG 15, we use shmem_request_hook_type */
#if PG_VERSION_NUM < PG_VERSION_15
/* allocate shared memory */
if (!IsUnderPostmaster)
{
RequestAddinShmemSpace(SharedConnectionStatsShmemSize());
}
#endif
prev_shmem_startup_hook = shmem_startup_hook; prev_shmem_startup_hook = shmem_startup_hook;
shmem_startup_hook = SharedConnectionStatsShmemInit; shmem_startup_hook = SharedConnectionStatsShmemInit;
} }

View File

@ -11,6 +11,7 @@
#include "postgres.h" #include "postgres.h"
#include "utils/elog.h" #include "utils/elog.h"
#include "utils/memutils.h" /* for TopTransactionContext */
#include "distributed/connection_management.h" #include "distributed/connection_management.h"
#include "distributed/error_codes.h" #include "distributed/error_codes.h"

View File

@ -82,7 +82,10 @@ static void AppendStorageParametersToString(StringInfo stringBuffer,
List *optionList); List *optionList);
static const char * convert_aclright_to_string(int aclright); static const char * convert_aclright_to_string(int aclright);
static void simple_quote_literal(StringInfo buf, const char *val); static void simple_quote_literal(StringInfo buf, const char *val);
static SubscriptingRef * TargetEntryExprFindSubsRef(Expr *expr);
static void AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer); static void AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer);
static void process_acl_items(Acl *acl, const char *relationName,
const char *attributeName, List **defs);
/* /*
@ -258,10 +261,8 @@ pg_get_sequencedef_string(Oid sequenceRelationId)
char *typeName = format_type_be(pgSequenceForm->seqtypid); char *typeName = format_type_be(pgSequenceForm->seqtypid);
char *sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, char *sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND,
#if (PG_VERSION_NUM >= PG_VERSION_15)
get_rel_persistence(sequenceRelationId) == get_rel_persistence(sequenceRelationId) ==
RELPERSISTENCE_UNLOGGED ? "UNLOGGED " : "", RELPERSISTENCE_UNLOGGED ? "UNLOGGED " : "",
#endif
qualifiedSequenceName, qualifiedSequenceName,
typeName, typeName,
pgSequenceForm->seqincrement, pgSequenceForm->seqmin, pgSequenceForm->seqincrement, pgSequenceForm->seqmin,
@ -315,6 +316,7 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
AttrNumber defaultValueIndex = 0; AttrNumber defaultValueIndex = 0;
AttrNumber constraintIndex = 0; AttrNumber constraintIndex = 0;
AttrNumber constraintCount = 0; AttrNumber constraintCount = 0;
bool relIsPartition = false;
StringInfoData buffer = { NULL, 0, 0, 0 }; StringInfoData buffer = { NULL, 0, 0, 0 };
/* /*
@ -342,6 +344,8 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
} }
appendStringInfo(&buffer, "TABLE %s (", relationName); appendStringInfo(&buffer, "TABLE %s (", relationName);
relIsPartition = relation->rd_rel->relispartition;
} }
else else
{ {
@ -392,10 +396,18 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
GetCompressionMethodName(attributeForm->attcompression)); GetCompressionMethodName(attributeForm->attcompression));
} }
if (attributeForm->attidentity && includeIdentityDefaults) /*
* If this is an identity column include its identity definition in the
* DDL only if its relation is not a partition. If it is a partition, any
* identity is inherited from the parent table by ATTACH PARTITION. This
* is Postgres 17+ behavior (commit 699586315); prior PG versions did not
* support identity columns in partitioned tables.
*/
if (attributeForm->attidentity && includeIdentityDefaults && !relIsPartition)
{ {
bool missing_ok = false; bool missing_ok = false;
Oid seqOid = getIdentitySequence(RelationGetRelid(relation), Oid seqOid = getIdentitySequence(identitySequenceRelation_compat(
relation),
attributeForm->attnum, missing_ok); attributeForm->attnum, missing_ok);
if (includeIdentityDefaults == INCLUDE_IDENTITY) if (includeIdentityDefaults == INCLUDE_IDENTITY)
@ -738,7 +750,18 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId)
* If the user changed the column's statistics target, create * If the user changed the column's statistics target, create
* alter statement and add statement to a list for later processing. * alter statement and add statement to a list for later processing.
*/ */
if (attributeForm->attstattarget >= 0) HeapTuple atttuple = SearchSysCache2(ATTNUM,
ObjectIdGetDatum(tableRelationId),
Int16GetDatum(attributeForm->attnum));
if (!HeapTupleIsValid(atttuple))
{
elog(ERROR, "cache lookup failed for attribute %d of relation %u",
attributeForm->attnum, tableRelationId);
}
int32 targetAttstattarget = getAttstattarget_compat(atttuple);
ReleaseSysCache(atttuple);
if (targetAttstattarget >= 0)
{ {
StringInfoData statement = { NULL, 0, 0, 0 }; StringInfoData statement = { NULL, 0, 0, 0 };
initStringInfo(&statement); initStringInfo(&statement);
@ -746,7 +769,7 @@ pg_get_tablecolumnoptionsdef_string(Oid tableRelationId)
appendStringInfo(&statement, "ALTER COLUMN %s ", appendStringInfo(&statement, "ALTER COLUMN %s ",
quote_identifier(attributeName)); quote_identifier(attributeName));
appendStringInfo(&statement, "SET STATISTICS %d", appendStringInfo(&statement, "SET STATISTICS %d",
attributeForm->attstattarget); targetAttstattarget);
columnOptionList = lappend(columnOptionList, statement.data); columnOptionList = lappend(columnOptionList, statement.data);
} }
@ -835,12 +858,10 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid,
appendStringInfoString(buffer, ") "); appendStringInfoString(buffer, ") ");
} }
#if PG_VERSION_NUM >= PG_VERSION_15
if (indexStmt->nulls_not_distinct) if (indexStmt->nulls_not_distinct)
{ {
appendStringInfoString(buffer, "NULLS NOT DISTINCT "); appendStringInfoString(buffer, "NULLS NOT DISTINCT ");
} }
#endif /* PG_VERSION_15 */
if (indexStmt->options != NIL) if (indexStmt->options != NIL)
{ {
@ -938,7 +959,7 @@ bool
IsReindexWithParam_compat(ReindexStmt *reindexStmt, char *param) IsReindexWithParam_compat(ReindexStmt *reindexStmt, char *param)
{ {
DefElem *opt = NULL; DefElem *opt = NULL;
foreach_ptr(opt, reindexStmt->params) foreach_declared_ptr(opt, reindexStmt->params)
{ {
if (strcmp(opt->defname, param) == 0) if (strcmp(opt->defname, param) == 0)
{ {
@ -963,7 +984,7 @@ AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer)
char *tableSpaceName = NULL; char *tableSpaceName = NULL;
DefElem *opt = NULL; DefElem *opt = NULL;
foreach_ptr(opt, reindexStmt->params) foreach_declared_ptr(opt, reindexStmt->params)
{ {
if (strcmp(opt->defname, "tablespace") == 0) if (strcmp(opt->defname, "tablespace") == 0)
{ {
@ -1092,9 +1113,8 @@ pg_get_indexclusterdef_string(Oid indexRelationId)
/* /*
* pg_get_table_grants returns a list of sql statements which recreate the * pg_get_table_grants returns a list of sql statements which recreate the
* permissions for a specific table. * permissions for a specific table, including attributes privileges.
* *
* This function is modeled after aclexplode(), don't change too heavily.
*/ */
List * List *
pg_get_table_grants(Oid relationId) pg_get_table_grants(Oid relationId)
@ -1118,6 +1138,8 @@ pg_get_table_grants(Oid relationId)
errmsg("relation with OID %u does not exist", errmsg("relation with OID %u does not exist",
relationId))); relationId)));
} }
Form_pg_class classForm = (Form_pg_class) GETSTRUCT(classTuple);
AttrNumber nattrs = classForm->relnatts;
Datum aclDatum = SysCacheGetAttr(RELOID, classTuple, Anum_pg_class_relacl, Datum aclDatum = SysCacheGetAttr(RELOID, classTuple, Anum_pg_class_relacl,
&isNull); &isNull);
@ -1145,80 +1167,132 @@ pg_get_table_grants(Oid relationId)
/* iterate through the acl datastructure, emit GRANTs */ /* iterate through the acl datastructure, emit GRANTs */
Acl *acl = DatumGetAclP(aclDatum); Acl *acl = DatumGetAclP(aclDatum);
AclItem *aidat = ACL_DAT(acl);
int offtype = -1; process_acl_items(acl, relationName, NULL, &defs);
int i = 0;
while (i < ACL_NUM(acl)) /* if we have a detoasted copy, free it */
if ((Pointer) acl != DatumGetPointer(aclDatum))
pfree(acl);
}
resetStringInfo(&buffer);
/* lookup all attribute level grants */
for (AttrNumber attNum = 1; attNum <= nattrs; attNum++)
{
HeapTuple attTuple = SearchSysCache2(ATTNUM, ObjectIdGetDatum(relationId),
Int16GetDatum(attNum));
if (!HeapTupleIsValid(attTuple))
{ {
AclItem *aidata = NULL; ereport(ERROR,
AclMode priv_bit = 0; (errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("attribute with OID %u does not exist",
attNum)));
}
offtype++; Form_pg_attribute thisAttribute = (Form_pg_attribute) GETSTRUCT(attTuple);
if (offtype == N_ACL_RIGHTS) /* ignore dropped columns */
if (thisAttribute->attisdropped)
{
ReleaseSysCache(attTuple);
continue;
}
Datum aclAttDatum = SysCacheGetAttr(ATTNUM, attTuple, Anum_pg_attribute_attacl,
&isNull);
if (!isNull)
{
/* iterate through the acl datastructure, emit GRANTs */
Acl *acl = DatumGetAclP(aclAttDatum);
process_acl_items(acl, relationName, NameStr(thisAttribute->attname), &defs);
/* if we have a detoasted copy, free it */
if ((Pointer) acl != DatumGetPointer(aclAttDatum))
pfree(acl);
}
ReleaseSysCache(attTuple);
}
relation_close(relation, NoLock);
return defs;
}
/*
* Helper function to process ACL items.
* If attributeName is NULL, the function emits table-level GRANT commands;
* otherwise it emits column-level GRANT commands.
* This function was modeled after aclexplode(), previously in pg_get_table_grants().
*/
static void
process_acl_items(Acl *acl, const char *relationName, const char *attributeName,
List **defs)
{
AclItem *aidat = ACL_DAT(acl);
int i = 0;
int offtype = -1;
StringInfoData buffer;
initStringInfo(&buffer);
while (i < ACL_NUM(acl))
{
offtype++;
if (offtype == N_ACL_RIGHTS)
{
offtype = 0;
i++;
if (i >= ACL_NUM(acl)) /* done */
{ {
offtype = 0; break;
i++; }
if (i >= ACL_NUM(acl)) /* done */ }
{
break; AclItem *aidata = &aidat[i];
} AclMode priv_bit = 1 << offtype;
if (ACLITEM_GET_PRIVS(*aidata) & priv_bit)
{
const char *roleName = NULL;
const char *withGrant = "";
if (aidata->ai_grantee != 0)
{
roleName = quote_identifier(GetUserNameFromId(aidata->ai_grantee, false));
}
else
{
roleName = "PUBLIC";
} }
aidata = &aidat[i]; if ((ACLITEM_GET_GOPTIONS(*aidata) & priv_bit) != 0)
priv_bit = 1 << offtype;
if (ACLITEM_GET_PRIVS(*aidata) & priv_bit)
{ {
const char *roleName = NULL; withGrant = " WITH GRANT OPTION";
const char *withGrant = ""; }
if (aidata->ai_grantee != 0)
{
HeapTuple htup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(aidata->ai_grantee));
if (HeapTupleIsValid(htup))
{
Form_pg_authid authForm = ((Form_pg_authid) GETSTRUCT(htup));
roleName = quote_identifier(NameStr(authForm->rolname));
ReleaseSysCache(htup);
}
else
{
elog(ERROR, "cache lookup failed for role %u", aidata->ai_grantee);
}
}
else
{
roleName = "PUBLIC";
}
if ((ACLITEM_GET_GOPTIONS(*aidata) & priv_bit) != 0)
{
withGrant = " WITH GRANT OPTION";
}
if (attributeName)
{
appendStringInfo(&buffer, "GRANT %s(%s) ON %s TO %s%s",
convert_aclright_to_string(priv_bit),
quote_identifier(attributeName),
relationName,
roleName,
withGrant);
}
else
{
appendStringInfo(&buffer, "GRANT %s ON %s TO %s%s", appendStringInfo(&buffer, "GRANT %s ON %s TO %s%s",
convert_aclright_to_string(priv_bit), convert_aclright_to_string(priv_bit),
relationName, relationName,
roleName, roleName,
withGrant); withGrant);
defs = lappend(defs, pstrdup(buffer.data));
resetStringInfo(&buffer);
} }
*defs = lappend(*defs, pstrdup(buffer.data));
resetStringInfo(&buffer);
} }
} }
resetStringInfo(&buffer);
relation_close(relation, NoLock);
return defs;
/* *INDENT-ON* */
} }
@ -1347,6 +1421,10 @@ convert_aclright_to_string(int aclright)
return "TEMPORARY"; return "TEMPORARY";
case ACL_CONNECT: case ACL_CONNECT:
return "CONNECT"; return "CONNECT";
#if PG_VERSION_NUM >= PG_VERSION_17
case ACL_MAINTAIN:
return "MAINTAIN";
#endif
default: default:
elog(ERROR, "unrecognized aclright: %d", aclright); elog(ERROR, "unrecognized aclright: %d", aclright);
return NULL; return NULL;
@ -1638,3 +1716,255 @@ RoleSpecString(RoleSpec *spec, bool withQuoteIdentifier)
} }
} }
} }
/*
* Recursively search an expression for a Param and return its paramid
* Intended for indirection management: UPDATE SET () = (SELECT )
* Does not cover all options but those supported by Citus.
*/
static int
GetParamId(Node *expr)
{
int paramid = 0;
if (expr == NULL)
{
return paramid;
}
/* If it's a Param, return its attnum */
if (IsA(expr, Param))
{
Param *param = (Param *) expr;
paramid = param->paramid;
}
/* If it's a FuncExpr, search in arguments */
else if (IsA(expr, FuncExpr))
{
FuncExpr *func = (FuncExpr *) expr;
ListCell *lc;
foreach(lc, func->args)
{
paramid = GetParamId((Node *) lfirst(lc));
if (paramid != 0)
{
break; /* Stop at the first valid paramid */
}
}
}
return paramid;
}
/*
* list_sort comparator to sort target list by paramid (in MULTIEXPR)
* Intended for indirection management: UPDATE SET () = (SELECT )
*/
static int
target_list_cmp(const ListCell *a, const ListCell *b)
{
TargetEntry *tleA = lfirst(a);
TargetEntry *tleB = lfirst(b);
/*
* Deal with resjunk entries; sublinks are marked resjunk and
* are placed at the end of the target list so this logic
* ensures they stay grouped at the end of the target list:
*/
if (tleA->resjunk || tleB->resjunk)
{
return tleA->resjunk - tleB->resjunk;
}
int la = GetParamId((Node *) tleA->expr);
int lb = GetParamId((Node *) tleB->expr);
/*
* Should be looking at legitimate param ids
*/
Assert(la > 0);
Assert(lb > 0);
/*
* Return -1, 0 or 1 depending on if la is less than,
* equal to or greater than lb
*/
return (la > lb) - (la < lb);
}
/*
* Used by get_update_query_targetlist_def() (in ruleutils) to reorder the target
* list on the left side of the update:
* SET () = (SELECT )
* Reordering the SELECT side only does not work, consider a case like:
* SET (col_1, col3) = (SELECT 1, 3), (col_2) = (SELECT 2)
* Without ensure_update_targetlist_in_param_order(), this will lead to an incorrect
* deparsed query:
* SET (col_1, col2) = (SELECT 1, 3), (col_3) = (SELECT 2)
*/
void
ensure_update_targetlist_in_param_order(List *targetList)
{
bool need_to_sort_target_list = false;
int previous_paramid = 0;
ListCell *l;
foreach(l, targetList)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
if (!tle->resjunk)
{
int paramid = GetParamId((Node *) tle->expr);
if (paramid < previous_paramid)
{
need_to_sort_target_list = true;
break;
}
previous_paramid = paramid;
}
}
if (need_to_sort_target_list)
{
list_sort(targetList, target_list_cmp);
}
}
/*
* ExpandMergedSubscriptingRefEntries takes a list of target entries and expands
* each one that references a SubscriptingRef node that indicates multiple (field)
* updates on the same attribute, which is applicable for array/json types atm.
*/
List *
ExpandMergedSubscriptingRefEntries(List *targetEntryList)
{
List *newTargetEntryList = NIL;
ListCell *tgtCell = NULL;
foreach(tgtCell, targetEntryList)
{
TargetEntry *targetEntry = (TargetEntry *) lfirst(tgtCell);
List *expandedTargetEntries = NIL;
Expr *expr = targetEntry->expr;
while (expr)
{
SubscriptingRef *subsRef = TargetEntryExprFindSubsRef(expr);
if (!subsRef)
{
break;
}
/*
* Remove refexpr from the SubscriptingRef that we are about to
* wrap in a new TargetEntry and save it for the next one.
*/
Expr *refexpr = subsRef->refexpr;
subsRef->refexpr = NULL;
/*
* Wrap the Expr that holds SubscriptingRef (directly or indirectly)
* in a new TargetEntry; note that it doesn't have a refexpr anymore.
*/
TargetEntry *newTargetEntry = copyObject(targetEntry);
newTargetEntry->expr = expr;
expandedTargetEntries = lappend(expandedTargetEntries, newTargetEntry);
/* now inspect the refexpr that SubscriptingRef at hand were holding */
expr = refexpr;
}
if (expandedTargetEntries == NIL)
{
/* return original entry since it doesn't hold a SubscriptingRef node */
newTargetEntryList = lappend(newTargetEntryList, targetEntry);
}
else
{
/*
* Need to concat expanded target list entries in reverse order
* to preserve ordering of the original target entry list.
*/
List *reversedTgtEntries = NIL;
ListCell *revCell = NULL;
foreach(revCell, expandedTargetEntries)
{
TargetEntry *tgtEntry = (TargetEntry *) lfirst(revCell);
reversedTgtEntries = lcons(tgtEntry, reversedTgtEntries);
}
newTargetEntryList = list_concat(newTargetEntryList, reversedTgtEntries);
}
}
return newTargetEntryList;
}
/*
* TargetEntryExprFindSubsRef searches given Expr --assuming that it is part
* of a target list entry-- to see if it directly (i.e.: itself) or indirectly
* (e.g.: behind some level of coercions) holds a SubscriptingRef node.
*
* Returns the original SubscriptingRef node on success or NULL otherwise.
*
* Note that it wouldn't add much value to use expression_tree_walker here
* since we are only interested in a subset of the fields of a few certain
* node types.
*/
static SubscriptingRef *
TargetEntryExprFindSubsRef(Expr *expr)
{
Node *node = (Node *) expr;
while (node)
{
if (IsA(node, FieldStore))
{
/*
* ModifyPartialQuerySupported doesn't allow INSERT/UPDATE via
* FieldStore. If we decide supporting such commands, then we
* should take the first element of "newvals" list into account
* here. This is because, to support such commands, we will need
* to expand merged FieldStore into separate target entries too.
*
* For this reason, this block is not reachable atm and need to
* uncomment the following if we decide supporting such commands.
*
* """
* FieldStore *fieldStore = (FieldStore *) node;
* node = (Node *) linitial(fieldStore->newvals);
* """
*/
ereport(ERROR, (errmsg("unexpectedly got FieldStore object when "
"generating shard query")));
}
else if (IsA(node, CoerceToDomain))
{
CoerceToDomain *coerceToDomain = (CoerceToDomain *) node;
if (coerceToDomain->coercionformat != COERCE_IMPLICIT_CAST)
{
/* not an implicit coercion, cannot reach to a SubscriptingRef */
break;
}
node = (Node *) coerceToDomain->arg;
}
else if (IsA(node, SubscriptingRef))
{
return (SubscriptingRef *) node;
}
else
{
/* got a node that we are not interested in */
break;
}
}
return NULL;
}

View File

@ -47,7 +47,7 @@ DeparseTreeNodes(List *stmts)
{ {
List *sqls = NIL; List *sqls = NIL;
Node *stmt = NULL; Node *stmt = NULL;
foreach_ptr(stmt, stmts) foreach_declared_ptr(stmt, stmts)
{ {
sqls = lappend(sqls, DeparseTreeNode(stmt)); sqls = lappend(sqls, DeparseTreeNode(stmt));
} }

View File

@ -174,7 +174,7 @@ static void
AppendBasicAlterDatabaseOptions(StringInfo buf, AlterDatabaseStmt *stmt) AppendBasicAlterDatabaseOptions(StringInfo buf, AlterDatabaseStmt *stmt)
{ {
DefElem *def = NULL; DefElem *def = NULL;
foreach_ptr(def, stmt->options) foreach_declared_ptr(def, stmt->options)
{ {
DefElemOptionToStatement(buf, def, alterDatabaseOptionFormats, lengthof( DefElemOptionToStatement(buf, def, alterDatabaseOptionFormats, lengthof(
alterDatabaseOptionFormats)); alterDatabaseOptionFormats));
@ -211,7 +211,6 @@ DeparseAlterDatabaseStmt(Node *node)
} }
#if PG_VERSION_NUM >= PG_VERSION_15
char * char *
DeparseAlterDatabaseRefreshCollStmt(Node *node) DeparseAlterDatabaseRefreshCollStmt(Node *node)
{ {
@ -228,8 +227,6 @@ DeparseAlterDatabaseRefreshCollStmt(Node *node)
} }
#endif
static void static void
AppendAlterDatabaseSetStmt(StringInfo buf, AlterDatabaseSetStmt *stmt) AppendAlterDatabaseSetStmt(StringInfo buf, AlterDatabaseSetStmt *stmt)
{ {
@ -290,7 +287,7 @@ AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt)
quote_identifier(stmt->dbname)); quote_identifier(stmt->dbname));
DefElem *option = NULL; DefElem *option = NULL;
foreach_ptr(option, stmt->options) foreach_declared_ptr(option, stmt->options)
{ {
DefElemOptionToStatement(buf, option, createDatabaseOptionFormats, DefElemOptionToStatement(buf, option, createDatabaseOptionFormats,
lengthof(createDatabaseOptionFormats)); lengthof(createDatabaseOptionFormats));

View File

@ -70,7 +70,7 @@ DeparseCreateDomainStmt(Node *node)
} }
Constraint *constraint = NULL; Constraint *constraint = NULL;
foreach_ptr(constraint, stmt->constraints) foreach_declared_ptr(constraint, stmt->constraints)
{ {
AppendConstraint(&buf, constraint, stmt->domainname, stmt->typeName); AppendConstraint(&buf, constraint, stmt->domainname, stmt->typeName);
} }
@ -117,7 +117,7 @@ DeparseDropDomainStmt(Node *node)
TypeName *domainName = NULL; TypeName *domainName = NULL;
bool first = true; bool first = true;
foreach_ptr(domainName, stmt->objects) foreach_declared_ptr(domainName, stmt->objects)
{ {
if (!first) if (!first)
{ {

View File

@ -40,7 +40,7 @@ DefElem *
GetExtensionOption(List *extensionOptions, const char *defname) GetExtensionOption(List *extensionOptions, const char *defname)
{ {
DefElem *defElement = NULL; DefElem *defElement = NULL;
foreach_ptr(defElement, extensionOptions) foreach_declared_ptr(defElement, extensionOptions)
{ {
if (IsA(defElement, DefElem) && if (IsA(defElement, DefElem) &&
strncmp(defElement->defname, defname, NAMEDATALEN) == 0) strncmp(defElement->defname, defname, NAMEDATALEN) == 0)
@ -112,7 +112,7 @@ AppendCreateExtensionStmtOptions(StringInfo buf, List *options)
/* Add the options to the statement */ /* Add the options to the statement */
DefElem *defElem = NULL; DefElem *defElem = NULL;
foreach_ptr(defElem, options) foreach_declared_ptr(defElem, options)
{ {
if (strcmp(defElem->defname, "schema") == 0) if (strcmp(defElem->defname, "schema") == 0)
{ {
@ -181,7 +181,7 @@ AppendAlterExtensionStmt(StringInfo buf, AlterExtensionStmt *alterExtensionStmt)
* the options. * the options.
*/ */
DefElem *option = NULL; DefElem *option = NULL;
foreach_ptr(option, optionsList) foreach_declared_ptr(option, optionsList)
{ {
if (strcmp(option->defname, "new_version") == 0) if (strcmp(option->defname, "new_version") == 0)
{ {

View File

@ -176,7 +176,7 @@ AppendAlterForeignServerOptions(StringInfo buf, AlterForeignServerStmt *stmt)
DefElemAction action = DEFELEM_UNSPEC; DefElemAction action = DEFELEM_UNSPEC;
DefElem *def = NULL; DefElem *def = NULL;
foreach_ptr(def, stmt->options) foreach_declared_ptr(def, stmt->options)
{ {
if (def->defaction != DEFELEM_UNSPEC) if (def->defaction != DEFELEM_UNSPEC)
{ {
@ -242,7 +242,7 @@ static void
AppendServerNames(StringInfo buf, DropStmt *stmt) AppendServerNames(StringInfo buf, DropStmt *stmt)
{ {
String *serverValue = NULL; String *serverValue = NULL;
foreach_ptr(serverValue, stmt->objects) foreach_declared_ptr(serverValue, stmt->objects)
{ {
const char *serverString = quote_identifier(strVal(serverValue)); const char *serverString = quote_identifier(strVal(serverValue));
appendStringInfo(buf, "%s", serverString); appendStringInfo(buf, "%s", serverString);

View File

@ -32,7 +32,6 @@
static void AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt, static void AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
bool whereClauseNeedsTransform, bool whereClauseNeedsTransform,
bool includeLocalTables); bool includeLocalTables);
#if (PG_VERSION_NUM >= PG_VERSION_15)
static bool AppendPublicationObjects(StringInfo buf, List *publicationObjects, static bool AppendPublicationObjects(StringInfo buf, List *publicationObjects,
bool whereClauseNeedsTransform, bool whereClauseNeedsTransform,
bool includeLocalTables); bool includeLocalTables);
@ -40,10 +39,6 @@ static void AppendWhereClauseExpression(StringInfo buf, RangeVar *tableName,
Node *whereClause, Node *whereClause,
bool whereClauseNeedsTransform); bool whereClauseNeedsTransform);
static void AppendAlterPublicationAction(StringInfo buf, AlterPublicationAction action); static void AppendAlterPublicationAction(StringInfo buf, AlterPublicationAction action);
#else
static bool AppendTables(StringInfo buf, List *tables, bool includeLocalTables);
static void AppendDefElemAction(StringInfo buf, DefElemAction action);
#endif
static bool AppendAlterPublicationStmt(StringInfo buf, AlterPublicationStmt *stmt, static bool AppendAlterPublicationStmt(StringInfo buf, AlterPublicationStmt *stmt,
bool whereClauseNeedsTransform, bool whereClauseNeedsTransform,
bool includeLocalTables); bool includeLocalTables);
@ -108,7 +103,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
{ {
appendStringInfoString(buf, " FOR ALL TABLES"); appendStringInfoString(buf, " FOR ALL TABLES");
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
else if (stmt->pubobjects != NIL) else if (stmt->pubobjects != NIL)
{ {
bool hasObjects = false; bool hasObjects = false;
@ -118,7 +112,7 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
* Check whether there are objects to propagate, mainly to know whether * Check whether there are objects to propagate, mainly to know whether
* we should include "FOR". * we should include "FOR".
*/ */
foreach_ptr(publicationObject, stmt->pubobjects) foreach_declared_ptr(publicationObject, stmt->pubobjects)
{ {
if (publicationObject->pubobjtype == PUBLICATIONOBJ_TABLE) if (publicationObject->pubobjtype == PUBLICATIONOBJ_TABLE)
{ {
@ -146,32 +140,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
includeLocalTables); includeLocalTables);
} }
} }
#else
else if (stmt->tables != NIL)
{
bool hasTables = false;
RangeVar *rangeVar = NULL;
/*
* Check whether there are tables to propagate, mainly to know whether
* we should include "FOR".
*/
foreach_ptr(rangeVar, stmt->tables)
{
if (includeLocalTables || IsCitusTableRangeVar(rangeVar, NoLock, false))
{
hasTables = true;
break;
}
}
if (hasTables)
{
appendStringInfoString(buf, " FOR");
AppendTables(buf, stmt->tables, includeLocalTables);
}
}
#endif
if (stmt->options != NIL) if (stmt->options != NIL)
{ {
@ -182,8 +150,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* AppendPublicationObjects appends a string representing a list of publication * AppendPublicationObjects appends a string representing a list of publication
* objects to a buffer. * objects to a buffer.
@ -198,7 +164,7 @@ AppendPublicationObjects(StringInfo buf, List *publicationObjects,
PublicationObjSpec *publicationObject = NULL; PublicationObjSpec *publicationObject = NULL;
bool appendedObject = false; bool appendedObject = false;
foreach_ptr(publicationObject, publicationObjects) foreach_declared_ptr(publicationObject, publicationObjects)
{ {
if (publicationObject->pubobjtype == PUBLICATIONOBJ_TABLE) if (publicationObject->pubobjtype == PUBLICATIONOBJ_TABLE)
{ {
@ -320,57 +286,6 @@ AppendWhereClauseExpression(StringInfo buf, RangeVar *tableName,
} }
#else
/*
* AppendPublicationObjects appends a string representing a list of publication
* objects to a buffer.
*
* For instance: TABLE users, departments
*/
static bool
AppendTables(StringInfo buf, List *tables, bool includeLocalTables)
{
RangeVar *rangeVar = NULL;
bool appendedObject = false;
foreach_ptr(rangeVar, tables)
{
if (!includeLocalTables &&
!IsCitusTableRangeVar(rangeVar, NoLock, false))
{
/* do not propagate local tables */
continue;
}
char *schemaName = rangeVar->schemaname;
char *tableName = rangeVar->relname;
if (schemaName != NULL)
{
/* qualified table name */
appendStringInfo(buf, "%s %s",
appendedObject ? "," : " TABLE",
quote_qualified_identifier(schemaName, tableName));
}
else
{
/* unqualified table name */
appendStringInfo(buf, "%s %s",
appendedObject ? "," : " TABLE",
quote_identifier(tableName));
}
appendedObject = true;
}
return appendedObject;
}
#endif
/* /*
* DeparseAlterPublicationSchemaStmt builds and returns a string representing * DeparseAlterPublicationSchemaStmt builds and returns a string representing
* an AlterPublicationStmt. * an AlterPublicationStmt.
@ -439,19 +354,12 @@ AppendAlterPublicationStmt(StringInfo buf, AlterPublicationStmt *stmt,
return true; return true;
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
AppendAlterPublicationAction(buf, stmt->action); AppendAlterPublicationAction(buf, stmt->action);
return AppendPublicationObjects(buf, stmt->pubobjects, whereClauseNeedsTransform, return AppendPublicationObjects(buf, stmt->pubobjects, whereClauseNeedsTransform,
includeLocalTables); includeLocalTables);
#else
AppendDefElemAction(buf, stmt->tableAction);
return AppendTables(buf, stmt->tables, includeLocalTables);
#endif
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* AppendAlterPublicationAction appends a string representing an AlterPublicationAction * AppendAlterPublicationAction appends a string representing an AlterPublicationAction
* to a buffer. * to a buffer.
@ -487,46 +395,6 @@ AppendAlterPublicationAction(StringInfo buf, AlterPublicationAction action)
} }
#else
/*
* AppendDefElemAction appends a string representing a DefElemAction
* to a buffer.
*/
static void
AppendDefElemAction(StringInfo buf, DefElemAction action)
{
switch (action)
{
case DEFELEM_ADD:
{
appendStringInfoString(buf, " ADD");
break;
}
case DEFELEM_DROP:
{
appendStringInfoString(buf, " DROP");
break;
}
case DEFELEM_SET:
{
appendStringInfoString(buf, " SET");
break;
}
default:
{
ereport(ERROR, (errmsg("unrecognized publication action: %d", action)));
}
}
}
#endif
/* /*
* DeparseDropPublicationStmt builds and returns a string representing the DropStmt * DeparseDropPublicationStmt builds and returns a string representing the DropStmt
*/ */
@ -651,11 +519,7 @@ AppendPublicationOptions(StringInfo stringBuffer, List *optionList)
appendStringInfo(stringBuffer, "%s = ", appendStringInfo(stringBuffer, "%s = ",
quote_identifier(optionName)); quote_identifier(optionName));
#if (PG_VERSION_NUM >= PG_VERSION_15)
if (valueType == T_Integer || valueType == T_Float || valueType == T_Boolean) if (valueType == T_Integer || valueType == T_Float || valueType == T_Boolean)
#else
if (valueType == T_Integer || valueType == T_Float)
#endif
{ {
/* string escaping is unnecessary for numeric types and can cause issues */ /* string escaping is unnecessary for numeric types and can cause issues */
appendStringInfo(stringBuffer, "%s", optionValue); appendStringInfo(stringBuffer, "%s", optionValue);

View File

@ -404,7 +404,7 @@ AppendRevokeAdminOptionFor(StringInfo buf, GrantRoleStmt *stmt)
if (!stmt->is_grant) if (!stmt->is_grant)
{ {
DefElem *opt = NULL; DefElem *opt = NULL;
foreach_ptr(opt, stmt->opt) foreach_declared_ptr(opt, stmt->opt)
{ {
if (strcmp(opt->defname, "admin") == 0) if (strcmp(opt->defname, "admin") == 0)
{ {
@ -440,7 +440,7 @@ AppendGrantWithAdminOption(StringInfo buf, GrantRoleStmt *stmt)
#if PG_VERSION_NUM >= PG_VERSION_16 #if PG_VERSION_NUM >= PG_VERSION_16
int opt_count = 0; int opt_count = 0;
DefElem *opt = NULL; DefElem *opt = NULL;
foreach_ptr(opt, stmt->opt) foreach_declared_ptr(opt, stmt->opt)
{ {
char *optval = defGetString(opt); char *optval = defGetString(opt);
bool option_value = false; bool option_value = false;

View File

@ -152,7 +152,7 @@ AppendDropSchemaStmt(StringInfo buf, DropStmt *stmt)
} }
String *schemaValue = NULL; String *schemaValue = NULL;
foreach_ptr(schemaValue, stmt->objects) foreach_declared_ptr(schemaValue, stmt->objects)
{ {
const char *schemaString = quote_identifier(strVal(schemaValue)); const char *schemaString = quote_identifier(strVal(schemaValue));
appendStringInfo(buf, "%s", schemaString); appendStringInfo(buf, "%s", schemaString);

View File

@ -10,37 +10,16 @@
#include "postgres.h" #include "postgres.h"
#include "catalog/namespace.h"
#include "nodes/parsenodes.h" #include "nodes/parsenodes.h"
#include "utils/builtins.h" #include "utils/builtins.h"
#include "distributed/deparser.h" #include "distributed/deparser.h"
static void AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt);
/*
* DeparseSecLabelStmt builds and returns a string representing of the
* SecLabelStmt for application on a remote server.
*/
char *
DeparseSecLabelStmt(Node *node)
{
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
StringInfoData buf = { 0 };
initStringInfo(&buf);
AppendSecLabelStmt(&buf, secLabelStmt);
return buf.data;
}
/*
* AppendSecLabelStmt generates the string representation of the
* SecLabelStmt and appends it to the buffer.
*/
static void static void
AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt) BeginSecLabel(StringInfo buf, SecLabelStmt *stmt)
{ {
initStringInfo(buf);
appendStringInfoString(buf, "SECURITY LABEL "); appendStringInfoString(buf, "SECURITY LABEL ");
if (stmt->provider != NULL) if (stmt->provider != NULL)
@ -49,31 +28,84 @@ AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt)
} }
appendStringInfoString(buf, "ON "); appendStringInfoString(buf, "ON ");
}
switch (stmt->objtype)
{
case OBJECT_ROLE: static void
{ EndSecLabel(StringInfo buf, SecLabelStmt *stmt)
appendStringInfo(buf, "ROLE %s ", quote_identifier(strVal(stmt->object))); {
break; appendStringInfo(buf, "IS %s", (stmt->label != NULL) ?
} quote_literal_cstr(stmt->label) : "NULL");
}
/* normally, we shouldn't reach this */
default:
{ /*
ereport(ERROR, (errmsg("unsupported security label statement for" * DeparseRoleSecLabelStmt builds and returns a string representation of the
" deparsing"))); * SecLabelStmt for application on a remote server. The SecLabelStmt is for
} * a role object.
} */
char *
appendStringInfoString(buf, "IS "); DeparseRoleSecLabelStmt(Node *node)
{
if (stmt->label != NULL) SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
{ char *role_name = strVal(secLabelStmt->object);
appendStringInfo(buf, "%s", quote_literal_cstr(stmt->label)); StringInfoData buf = { 0 };
}
else BeginSecLabel(&buf, secLabelStmt);
{ appendStringInfo(&buf, "ROLE %s ", quote_identifier(role_name));
appendStringInfoString(buf, "NULL"); EndSecLabel(&buf, secLabelStmt);
}
return buf.data;
}
/*
* DeparseTableSecLabelStmt builds and returns a string representation of the
* SecLabelStmt for application on a remote server. The SecLabelStmt is for a
* table.
*/
char *
DeparseTableSecLabelStmt(Node *node)
{
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
List *names = (List *) secLabelStmt->object;
StringInfoData buf = { 0 };
BeginSecLabel(&buf, secLabelStmt);
appendStringInfo(&buf, "TABLE %s", quote_identifier(strVal(linitial(names))));
if (list_length(names) > 1)
{
appendStringInfo(&buf, ".%s", quote_identifier(strVal(lsecond(names))));
}
appendStringInfoString(&buf, " ");
EndSecLabel(&buf, secLabelStmt);
return buf.data;
}
/*
* DeparseColumnSecLabelStmt builds and returns a string representation of the
* SecLabelStmt for application on a remote server. The SecLabelStmt is for a
* column of a distributed table.
*/
char *
DeparseColumnSecLabelStmt(Node *node)
{
SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node);
List *names = (List *) secLabelStmt->object;
StringInfoData buf = { 0 };
BeginSecLabel(&buf, secLabelStmt);
appendStringInfo(&buf, "COLUMN %s.%s",
quote_identifier(strVal(linitial(names))),
quote_identifier(strVal(lsecond(names))));
if (list_length(names) > 2)
{
appendStringInfo(&buf, ".%s", quote_identifier(strVal(lthird(names))));
}
appendStringInfoString(&buf, " ");
EndSecLabel(&buf, secLabelStmt);
return buf.data;
} }

View File

@ -28,9 +28,7 @@ static void AppendSequenceNameList(StringInfo buf, List *objects, ObjectType obj
static void AppendRenameSequenceStmt(StringInfo buf, RenameStmt *stmt); static void AppendRenameSequenceStmt(StringInfo buf, RenameStmt *stmt);
static void AppendAlterSequenceSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt); static void AppendAlterSequenceSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt);
static void AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt); static void AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt);
#if (PG_VERSION_NUM >= PG_VERSION_15)
static void AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt); static void AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt);
#endif
static void AppendGrantOnSequenceStmt(StringInfo buf, GrantStmt *stmt); static void AppendGrantOnSequenceStmt(StringInfo buf, GrantStmt *stmt);
static void AppendGrantOnSequenceSequences(StringInfo buf, GrantStmt *stmt); static void AppendGrantOnSequenceSequences(StringInfo buf, GrantStmt *stmt);
@ -262,8 +260,6 @@ AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt)
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* DeparseAlterSequencePersistenceStmt builds and returns a string representing * DeparseAlterSequencePersistenceStmt builds and returns a string representing
* the AlterTableStmt consisting of changing the persistence of a sequence * the AlterTableStmt consisting of changing the persistence of a sequence
@ -349,9 +345,6 @@ AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt)
} }
#endif
/* /*
* DeparseGrantOnSequenceStmt builds and returns a string representing the GrantOnSequenceStmt * DeparseGrantOnSequenceStmt builds and returns a string representing the GrantOnSequenceStmt
*/ */

View File

@ -177,8 +177,9 @@ AppendAlterStatisticsSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt)
static void static void
AppendAlterStatisticsStmt(StringInfo buf, AlterStatsStmt *stmt) AppendAlterStatisticsStmt(StringInfo buf, AlterStatsStmt *stmt)
{ {
appendStringInfo(buf, "ALTER STATISTICS %s SET STATISTICS %d", NameListToQuotedString( appendStringInfo(buf, "ALTER STATISTICS %s SET STATISTICS %d",
stmt->defnames), stmt->stxstattarget); NameListToQuotedString(stmt->defnames),
getIntStxstattarget_compat(stmt->stxstattarget));
} }
@ -216,7 +217,7 @@ AppendStatTypes(StringInfo buf, CreateStatsStmt *stmt)
appendStringInfoString(buf, " ("); appendStringInfoString(buf, " (");
String *statType = NULL; String *statType = NULL;
foreach_ptr(statType, stmt->stat_types) foreach_declared_ptr(statType, stmt->stat_types)
{ {
appendStringInfoString(buf, strVal(statType)); appendStringInfoString(buf, strVal(statType));
@ -235,7 +236,7 @@ AppendColumnNames(StringInfo buf, CreateStatsStmt *stmt)
{ {
StatsElem *column = NULL; StatsElem *column = NULL;
foreach_ptr(column, stmt->exprs) foreach_declared_ptr(column, stmt->exprs)
{ {
if (!column->name) if (!column->name)
{ {

View File

@ -193,12 +193,10 @@ AppendAlterTableCmdConstraint(StringInfo buf, Constraint *constraint,
{ {
appendStringInfoString(buf, " UNIQUE"); appendStringInfoString(buf, " UNIQUE");
#if (PG_VERSION_NUM >= PG_VERSION_15)
if (constraint->nulls_not_distinct == true) if (constraint->nulls_not_distinct == true)
{ {
appendStringInfoString(buf, " NULLS NOT DISTINCT"); appendStringInfoString(buf, " NULLS NOT DISTINCT");
} }
#endif
} }
if (subtype == AT_AddConstraint) if (subtype == AT_AddConstraint)

View File

@ -86,7 +86,7 @@ AppendDefElemList(StringInfo buf, List *defelems, char *objectName)
{ {
DefElem *defelem = NULL; DefElem *defelem = NULL;
bool first = true; bool first = true;
foreach_ptr(defelem, defelems) foreach_declared_ptr(defelem, defelems)
{ {
if (!first) if (!first)
{ {
@ -133,7 +133,7 @@ DeparseDropTextSearchConfigurationStmt(Node *node)
appendStringInfoString(&buf, "DROP TEXT SEARCH CONFIGURATION "); appendStringInfoString(&buf, "DROP TEXT SEARCH CONFIGURATION ");
List *nameList = NIL; List *nameList = NIL;
bool first = true; bool first = true;
foreach_ptr(nameList, stmt->objects) foreach_declared_ptr(nameList, stmt->objects)
{ {
if (!first) if (!first)
{ {
@ -171,7 +171,7 @@ DeparseDropTextSearchDictionaryStmt(Node *node)
appendStringInfoString(&buf, "DROP TEXT SEARCH DICTIONARY "); appendStringInfoString(&buf, "DROP TEXT SEARCH DICTIONARY ");
List *nameList = NIL; List *nameList = NIL;
bool first = true; bool first = true;
foreach_ptr(nameList, stmt->objects) foreach_declared_ptr(nameList, stmt->objects)
{ {
if (!first) if (!first)
{ {
@ -404,7 +404,7 @@ AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes)
{ {
String *tokentype = NULL; String *tokentype = NULL;
bool first = true; bool first = true;
foreach_ptr(tokentype, tokentypes) foreach_declared_ptr(tokentype, tokentypes)
{ {
if (nodeTag(tokentype) != T_String) if (nodeTag(tokentype) != T_String)
{ {
@ -432,7 +432,7 @@ AppendStringInfoDictnames(StringInfo buf, List *dicts)
{ {
List *dictNames = NIL; List *dictNames = NIL;
bool first = true; bool first = true;
foreach_ptr(dictNames, dicts) foreach_declared_ptr(dictNames, dicts)
{ {
if (!first) if (!first)
{ {

View File

@ -88,7 +88,7 @@ AppendViewNameList(StringInfo buf, List *viewNamesList)
{ {
bool isFirstView = true; bool isFirstView = true;
List *qualifiedViewName = NULL; List *qualifiedViewName = NULL;
foreach_ptr(qualifiedViewName, viewNamesList) foreach_declared_ptr(qualifiedViewName, viewNamesList)
{ {
char *quotedQualifiedVieName = NameListToQuotedString(qualifiedViewName); char *quotedQualifiedVieName = NameListToQuotedString(qualifiedViewName);
if (!isFirstView) if (!isFirstView)

View File

@ -83,7 +83,7 @@ QualifyDropCollationStmt(Node *node)
List *names = NIL; List *names = NIL;
List *name = NIL; List *name = NIL;
foreach_ptr(name, stmt->objects) foreach_declared_ptr(name, stmt->objects)
{ {
names = lappend(names, QualifyCollationName(name)); names = lappend(names, QualifyCollationName(name));
} }

View File

@ -67,7 +67,7 @@ QualifyDropDomainStmt(Node *node)
DropStmt *stmt = castNode(DropStmt, node); DropStmt *stmt = castNode(DropStmt, node);
TypeName *domainName = NULL; TypeName *domainName = NULL;
foreach_ptr(domainName, stmt->objects) foreach_declared_ptr(domainName, stmt->objects)
{ {
QualifyTypeName(domainName, stmt->missing_ok); QualifyTypeName(domainName, stmt->missing_ok);
} }
@ -249,7 +249,7 @@ QualifyCollate(CollateClause *collClause, bool missing_ok)
collClause->collname = NIL; collClause->collname = NIL;
char *name = NULL; char *name = NULL;
foreach_ptr(name, objName) foreach_declared_ptr(name, objName)
{ {
collClause->collname = lappend(collClause->collname, makeString(name)); collClause->collname = lappend(collClause->collname, makeString(name));
} }

View File

@ -19,11 +19,7 @@
#include "distributed/deparser.h" #include "distributed/deparser.h"
#include "distributed/listutils.h" #include "distributed/listutils.h"
#if (PG_VERSION_NUM >= PG_VERSION_15)
static void QualifyPublicationObjects(List *publicationObjects); static void QualifyPublicationObjects(List *publicationObjects);
#else
static void QualifyTables(List *tables);
#endif
static void QualifyPublicationRangeVar(RangeVar *publication); static void QualifyPublicationRangeVar(RangeVar *publication);
@ -36,16 +32,10 @@ QualifyCreatePublicationStmt(Node *node)
{ {
CreatePublicationStmt *stmt = castNode(CreatePublicationStmt, node); CreatePublicationStmt *stmt = castNode(CreatePublicationStmt, node);
#if (PG_VERSION_NUM >= PG_VERSION_15)
QualifyPublicationObjects(stmt->pubobjects); QualifyPublicationObjects(stmt->pubobjects);
#else
QualifyTables(stmt->tables);
#endif
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* QualifyPublicationObjects ensures all table names in a list of * QualifyPublicationObjects ensures all table names in a list of
* publication objects are fully qualified. * publication objects are fully qualified.
@ -55,7 +45,7 @@ QualifyPublicationObjects(List *publicationObjects)
{ {
PublicationObjSpec *publicationObject = NULL; PublicationObjSpec *publicationObject = NULL;
foreach_ptr(publicationObject, publicationObjects) foreach_declared_ptr(publicationObject, publicationObjects)
{ {
if (publicationObject->pubobjtype == PUBLICATIONOBJ_TABLE) if (publicationObject->pubobjtype == PUBLICATIONOBJ_TABLE)
{ {
@ -68,26 +58,6 @@ QualifyPublicationObjects(List *publicationObjects)
} }
#else
/*
* QualifyTables ensures all table names in a list are fully qualified.
*/
static void
QualifyTables(List *tables)
{
RangeVar *rangeVar = NULL;
foreach_ptr(rangeVar, tables)
{
QualifyPublicationRangeVar(rangeVar);
}
}
#endif
/* /*
* QualifyPublicationObjects ensures all table names in a list of * QualifyPublicationObjects ensures all table names in a list of
* publication objects are fully qualified. * publication objects are fully qualified.
@ -97,11 +67,7 @@ QualifyAlterPublicationStmt(Node *node)
{ {
AlterPublicationStmt *stmt = castNode(AlterPublicationStmt, node); AlterPublicationStmt *stmt = castNode(AlterPublicationStmt, node);
#if (PG_VERSION_NUM >= PG_VERSION_15)
QualifyPublicationObjects(stmt->pubobjects); QualifyPublicationObjects(stmt->pubobjects);
#else
QualifyTables(stmt->tables);
#endif
} }

View File

@ -52,8 +52,6 @@ QualifyAlterSequenceOwnerStmt(Node *node)
} }
#if (PG_VERSION_NUM >= PG_VERSION_15)
/* /*
* QualifyAlterSequencePersistenceStmt transforms a * QualifyAlterSequencePersistenceStmt transforms a
* ALTER SEQUENCE .. SET LOGGED/UNLOGGED * ALTER SEQUENCE .. SET LOGGED/UNLOGGED
@ -80,9 +78,6 @@ QualifyAlterSequencePersistenceStmt(Node *node)
} }
#endif
/* /*
* QualifyAlterSequenceSchemaStmt transforms a * QualifyAlterSequenceSchemaStmt transforms a
* ALTER SEQUENCE .. SET SCHEMA .. * ALTER SEQUENCE .. SET SCHEMA ..
@ -148,7 +143,7 @@ QualifyDropSequenceStmt(Node *node)
List *objectNameListWithSchema = NIL; List *objectNameListWithSchema = NIL;
List *objectNameList = NULL; List *objectNameList = NULL;
foreach_ptr(objectNameList, stmt->objects) foreach_declared_ptr(objectNameList, stmt->objects)
{ {
RangeVar *seq = makeRangeVarFromNameList(objectNameList); RangeVar *seq = makeRangeVarFromNameList(objectNameList);
@ -192,7 +187,7 @@ QualifyGrantOnSequenceStmt(Node *node)
} }
List *qualifiedSequenceRangeVars = NIL; List *qualifiedSequenceRangeVars = NIL;
RangeVar *sequenceRangeVar = NULL; RangeVar *sequenceRangeVar = NULL;
foreach_ptr(sequenceRangeVar, stmt->objects) foreach_declared_ptr(sequenceRangeVar, stmt->objects)
{ {
if (sequenceRangeVar->schemaname == NULL) if (sequenceRangeVar->schemaname == NULL)
{ {

View File

@ -73,7 +73,7 @@ QualifyDropStatisticsStmt(Node *node)
List *objectNameListWithSchema = NIL; List *objectNameListWithSchema = NIL;
List *objectNameList = NULL; List *objectNameList = NULL;
foreach_ptr(objectNameList, dropStatisticsStmt->objects) foreach_declared_ptr(objectNameList, dropStatisticsStmt->objects)
{ {
RangeVar *stat = makeRangeVarFromNameList(objectNameList); RangeVar *stat = makeRangeVarFromNameList(objectNameList);

View File

@ -46,7 +46,7 @@ QualifyDropTextSearchConfigurationStmt(Node *node)
List *qualifiedObjects = NIL; List *qualifiedObjects = NIL;
List *objName = NIL; List *objName = NIL;
foreach_ptr(objName, stmt->objects) foreach_declared_ptr(objName, stmt->objects)
{ {
char *schemaName = NULL; char *schemaName = NULL;
char *tsconfigName = NULL; char *tsconfigName = NULL;
@ -87,7 +87,7 @@ QualifyDropTextSearchDictionaryStmt(Node *node)
List *qualifiedObjects = NIL; List *qualifiedObjects = NIL;
List *objName = NIL; List *objName = NIL;
foreach_ptr(objName, stmt->objects) foreach_declared_ptr(objName, stmt->objects)
{ {
char *schemaName = NULL; char *schemaName = NULL;
char *tsdictName = NULL; char *tsdictName = NULL;
@ -141,7 +141,7 @@ QualifyAlterTextSearchConfigurationStmt(Node *node)
bool useNewDicts = false; bool useNewDicts = false;
List *dicts = NULL; List *dicts = NULL;
List *dictName = NIL; List *dictName = NIL;
foreach_ptr(dictName, stmt->dicts) foreach_declared_ptr(dictName, stmt->dicts)
{ {
DeconstructQualifiedName(dictName, &schemaName, &objName); DeconstructQualifiedName(dictName, &schemaName, &objName);

View File

@ -31,7 +31,7 @@ QualifyDropViewStmt(Node *node)
List *qualifiedViewNames = NIL; List *qualifiedViewNames = NIL;
List *possiblyQualifiedViewName = NULL; List *possiblyQualifiedViewName = NULL;
foreach_ptr(possiblyQualifiedViewName, stmt->objects) foreach_declared_ptr(possiblyQualifiedViewName, stmt->objects)
{ {
char *viewName = NULL; char *viewName = NULL;
char *schemaName = NULL; char *schemaName = NULL;

View File

@ -67,7 +67,6 @@
#include "parser/parse_node.h" #include "parser/parse_node.h"
#include "parser/parse_agg.h" #include "parser/parse_agg.h"
#include "parser/parse_func.h" #include "parser/parse_func.h"
#include "parser/parse_node.h"
#include "parser/parse_oper.h" #include "parser/parse_oper.h"
#include "parser/parse_relation.h" #include "parser/parse_relation.h"
#include "parser/parser.h" #include "parser/parser.h"
@ -123,16 +122,18 @@ typedef struct
{ {
StringInfo buf; /* output buffer to append to */ StringInfo buf; /* output buffer to append to */
List *namespaces; /* List of deparse_namespace nodes */ List *namespaces; /* List of deparse_namespace nodes */
TupleDesc resultDesc; /* if top level of a view, the view's tupdesc */
List *targetList; /* Current query level's SELECT targetlist */
List *windowClause; /* Current query level's WINDOW clause */ List *windowClause; /* Current query level's WINDOW clause */
List *windowTList; /* targetlist for resolving WINDOW clause */
int prettyFlags; /* enabling of pretty-print functions */ int prettyFlags; /* enabling of pretty-print functions */
int wrapColumn; /* max line length, or -1 for no limit */ int wrapColumn; /* max line length, or -1 for no limit */
int indentLevel; /* current indent level for prettyprint */ int indentLevel; /* current indent level for prettyprint */
bool varprefix; /* true to print prefixes on Vars */ bool varprefix; /* true to print prefixes on Vars */
Oid distrelid; /* the distributed table being modified, if valid */ Oid distrelid; /* the distributed table being modified, if valid */
int64 shardid; /* a distributed table's shardid, if positive */ int64 shardid; /* a distributed table's shardid, if positive */
ParseExprKind special_exprkind; /* set only for exprkinds needing special bool colNamesVisible; /* do we care about output column names? */
* handling */ bool inGroupBy; /* deparsing GROUP BY clause? */
bool varInOrderBy; /* deparsing simple Var in ORDER BY? */
Bitmapset *appendparents; /* if not null, map child Vars of these relids Bitmapset *appendparents; /* if not null, map child Vars of these relids
* back to the parent rel */ * back to the parent rel */
} deparse_context; } deparse_context;
@ -364,27 +365,19 @@ static void get_query_def_extended(Query *query, StringInfo buf,
int startIndent); int startIndent);
static void get_values_def(List *values_lists, deparse_context *context); static void get_values_def(List *values_lists, deparse_context *context);
static void get_with_clause(Query *query, deparse_context *context); static void get_with_clause(Query *query, deparse_context *context);
static void get_select_query_def(Query *query, deparse_context *context, static void get_select_query_def(Query *query, deparse_context *context);
TupleDesc resultDesc, bool colNamesVisible); static void get_insert_query_def(Query *query, deparse_context *context);
static void get_insert_query_def(Query *query, deparse_context *context, static void get_update_query_def(Query *query, deparse_context *context);
bool colNamesVisible);
static void get_update_query_def(Query *query, deparse_context *context,
bool colNamesVisible);
static void get_merge_query_def(Query *query, deparse_context *context);
static void get_update_query_targetlist_def(Query *query, List *targetList, static void get_update_query_targetlist_def(Query *query, List *targetList,
deparse_context *context, deparse_context *context,
RangeTblEntry *rte); RangeTblEntry *rte);
static void get_delete_query_def(Query *query, deparse_context *context, static void get_delete_query_def(Query *query, deparse_context *context);
bool colNamesVisible); static void get_merge_query_def(Query *query, deparse_context *context);
static void get_utility_query_def(Query *query, deparse_context *context); static void get_utility_query_def(Query *query, deparse_context *context);
static void get_basic_select_query(Query *query, deparse_context *context, static void get_basic_select_query(Query *query, deparse_context *context);
TupleDesc resultDesc, bool colNamesVisible); static void get_target_list(List *targetList, deparse_context *context);
static void get_target_list(List *targetList, deparse_context *context,
TupleDesc resultDesc, bool colNamesVisible);
static void get_setop_query(Node *setOp, Query *query, static void get_setop_query(Node *setOp, Query *query,
deparse_context *context, deparse_context *context);
TupleDesc resultDesc, bool colNamesVisible);
static Node *get_rule_sortgroupclause(Index ref, List *tlist, static Node *get_rule_sortgroupclause(Index ref, List *tlist,
bool force_colno, bool force_colno,
deparse_context *context); deparse_context *context);
@ -462,7 +455,7 @@ static char *generate_fragment_name(char *schemaName, char *tableName);
static char *generate_function_name(Oid funcid, int nargs, static char *generate_function_name(Oid funcid, int nargs,
List *argnames, Oid *argtypes, List *argnames, Oid *argtypes,
bool has_variadic, bool *use_variadic_p, bool has_variadic, bool *use_variadic_p,
ParseExprKind special_exprkind); bool inGroupBy);
static List *get_insert_column_names_list(List *targetList, StringInfo buf, deparse_context *context, RangeTblEntry *rte); static List *get_insert_column_names_list(List *targetList, StringInfo buf, deparse_context *context, RangeTblEntry *rte);
#define only_marker(rte) ((rte)->inh ? "" : "ONLY ") #define only_marker(rte) ((rte)->inh ? "" : "ONLY ")
@ -636,13 +629,16 @@ pg_get_rule_expr(Node *expression)
context.buf = buffer; context.buf = buffer;
context.namespaces = NIL; context.namespaces = NIL;
context.resultDesc = NULL;
context.targetList = NIL;
context.windowClause = NIL; context.windowClause = NIL;
context.windowTList = NIL;
context.varprefix = false; context.varprefix = false;
context.prettyFlags = 0; context.prettyFlags = 0;
context.wrapColumn = WRAP_COLUMN_DEFAULT; context.wrapColumn = WRAP_COLUMN_DEFAULT;
context.indentLevel = 0; context.indentLevel = 0;
context.special_exprkind = EXPR_KIND_NONE; context.colNamesVisible = true;
context.inGroupBy = false;
context.varInOrderBy = false;
context.distrelid = InvalidOid; context.distrelid = InvalidOid;
context.shardid = INVALID_SHARD_ID; context.shardid = INVALID_SHARD_ID;
@ -2066,14 +2062,17 @@ get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace,
context.buf = buf; context.buf = buf;
context.namespaces = lcons(&dpns, list_copy(parentnamespace)); context.namespaces = lcons(&dpns, list_copy(parentnamespace));
context.resultDesc = NULL;
context.targetList = NIL;
context.windowClause = NIL; context.windowClause = NIL;
context.windowTList = NIL;
context.varprefix = (parentnamespace != NIL || context.varprefix = (parentnamespace != NIL ||
list_length(query->rtable) != 1); list_length(query->rtable) != 1);
context.prettyFlags = prettyFlags; context.prettyFlags = prettyFlags;
context.wrapColumn = wrapColumn; context.wrapColumn = wrapColumn;
context.indentLevel = startIndent; context.indentLevel = startIndent;
context.special_exprkind = EXPR_KIND_NONE; context.colNamesVisible = true;
context.inGroupBy = false;
context.varInOrderBy = false;
context.appendparents = NULL; context.appendparents = NULL;
context.distrelid = distrelid; context.distrelid = distrelid;
context.shardid = shardid; context.shardid = shardid;
@ -2083,19 +2082,21 @@ get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace,
switch (query->commandType) switch (query->commandType)
{ {
case CMD_SELECT: case CMD_SELECT:
get_select_query_def(query, &context, resultDesc, colNamesVisible); /* We set context.resultDesc only if it's a SELECT */
context.resultDesc = resultDesc;
get_select_query_def(query, &context);
break; break;
case CMD_UPDATE: case CMD_UPDATE:
get_update_query_def(query, &context, colNamesVisible); get_update_query_def(query, &context);
break; break;
case CMD_INSERT: case CMD_INSERT:
get_insert_query_def(query, &context, colNamesVisible); get_insert_query_def(query, &context);
break; break;
case CMD_DELETE: case CMD_DELETE:
get_delete_query_def(query, &context, colNamesVisible); get_delete_query_def(query, &context);
break; break;
case CMD_MERGE: case CMD_MERGE:
@ -2307,23 +2308,18 @@ get_with_clause(Query *query, deparse_context *context)
* ---------- * ----------
*/ */
static void static void
get_select_query_def(Query *query, deparse_context *context, get_select_query_def(Query *query, deparse_context *context)
TupleDesc resultDesc, bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
List *save_windowclause;
List *save_windowtlist;
bool force_colno; bool force_colno;
ListCell *l; ListCell *l;
/* Insert the WITH clause if given */ /* Insert the WITH clause if given */
get_with_clause(query, context); get_with_clause(query, context);
/* Set up context for possible window functions */ /* Subroutines may need to consult the SELECT targetlist and windowClause */
save_windowclause = context->windowClause; context->targetList = query->targetList;
context->windowClause = query->windowClause; context->windowClause = query->windowClause;
save_windowtlist = context->windowTList;
context->windowTList = query->targetList;
/* /*
* If the Query node has a setOperations tree, then it's the top level of * If the Query node has a setOperations tree, then it's the top level of
@ -2332,14 +2328,13 @@ get_select_query_def(Query *query, deparse_context *context,
*/ */
if (query->setOperations) if (query->setOperations)
{ {
get_setop_query(query->setOperations, query, context, resultDesc, get_setop_query(query->setOperations, query, context);
colNamesVisible);
/* ORDER BY clauses must be simple in this case */ /* ORDER BY clauses must be simple in this case */
force_colno = true; force_colno = true;
} }
else else
{ {
get_basic_select_query(query, context, resultDesc, colNamesVisible); get_basic_select_query(query, context);
force_colno = false; force_colno = false;
} }
@ -2429,9 +2424,6 @@ get_select_query_def(Query *query, deparse_context *context,
appendStringInfoString(buf, " SKIP LOCKED"); appendStringInfoString(buf, " SKIP LOCKED");
} }
} }
context->windowClause = save_windowclause;
context->windowTList = save_windowtlist;
} }
/* /*
@ -2506,8 +2498,7 @@ get_simple_values_rte(Query *query, TupleDesc resultDesc)
} }
static void static void
get_basic_select_query(Query *query, deparse_context *context, get_basic_select_query(Query *query, deparse_context *context)
TupleDesc resultDesc, bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
RangeTblEntry *values_rte; RangeTblEntry *values_rte;
@ -2525,7 +2516,7 @@ get_basic_select_query(Query *query, deparse_context *context,
* VALUES part. This reverses what transformValuesClause() did at parse * VALUES part. This reverses what transformValuesClause() did at parse
* time. * time.
*/ */
values_rte = get_simple_values_rte(query, resultDesc); values_rte = get_simple_values_rte(query, context->resultDesc);
if (values_rte) if (values_rte)
{ {
get_values_def(values_rte->values_lists, context); get_values_def(values_rte->values_lists, context);
@ -2563,7 +2554,7 @@ get_basic_select_query(Query *query, deparse_context *context,
} }
/* Then we tell what to select (the targetlist) */ /* Then we tell what to select (the targetlist) */
get_target_list(query->targetList, context, resultDesc, colNamesVisible); get_target_list(query->targetList, context);
/* Add the FROM clause if needed */ /* Add the FROM clause if needed */
get_from_clause(query, " FROM ", context); get_from_clause(query, " FROM ", context);
@ -2579,15 +2570,15 @@ get_basic_select_query(Query *query, deparse_context *context,
/* Add the GROUP BY clause if given */ /* Add the GROUP BY clause if given */
if (query->groupClause != NULL || query->groupingSets != NULL) if (query->groupClause != NULL || query->groupingSets != NULL)
{ {
ParseExprKind save_exprkind; bool save_ingroupby;
appendContextKeyword(context, " GROUP BY ", appendContextKeyword(context, " GROUP BY ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1); -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
if (query->groupDistinct) if (query->groupDistinct)
appendStringInfoString(buf, "DISTINCT "); appendStringInfoString(buf, "DISTINCT ");
save_exprkind = context->special_exprkind; save_ingroupby = context->inGroupBy;
context->special_exprkind = EXPR_KIND_GROUP_BY; context->inGroupBy = true;
if (query->groupingSets == NIL) if (query->groupingSets == NIL)
{ {
@ -2615,7 +2606,7 @@ get_basic_select_query(Query *query, deparse_context *context,
} }
} }
context->special_exprkind = save_exprkind; context->inGroupBy = save_ingroupby;
} }
/* Add the HAVING clause if given */ /* Add the HAVING clause if given */
@ -2634,14 +2625,11 @@ get_basic_select_query(Query *query, deparse_context *context,
/* ---------- /* ----------
* get_target_list - Parse back a SELECT target list * get_target_list - Parse back a SELECT target list
* *
* This is also used for RETURNING lists in INSERT/UPDATE/DELETE. * This is also used for RETURNING lists in INSERT/UPDATE/DELETE/MERGE.
*
* resultDesc and colNamesVisible are as for get_query_def()
* ---------- * ----------
*/ */
static void static void
get_target_list(List *targetList, deparse_context *context, get_target_list(List *targetList, deparse_context *context)
TupleDesc resultDesc, bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
StringInfoData targetbuf; StringInfoData targetbuf;
@ -2698,7 +2686,7 @@ get_target_list(List *targetList, deparse_context *context,
* assigned column name explicitly. Otherwise, show it only if * assigned column name explicitly. Otherwise, show it only if
* it's not FigureColname's fallback. * it's not FigureColname's fallback.
*/ */
attname = colNamesVisible ? NULL : "?column?"; attname = context->colNamesVisible ? NULL : "?column?";
} }
/* /*
@ -2707,8 +2695,9 @@ get_target_list(List *targetList, deparse_context *context,
* effects of any column RENAME that's been done on the view). * effects of any column RENAME that's been done on the view).
* Otherwise, just use what we can find in the TLE. * Otherwise, just use what we can find in the TLE.
*/ */
if (resultDesc && colno <= resultDesc->natts) if (context->resultDesc && colno <= context->resultDesc->natts)
colname = NameStr(TupleDescAttr(resultDesc, colno - 1)->attname); colname = NameStr(TupleDescAttr(context->resultDesc,
colno - 1)->attname);
else else
colname = tle->resname; colname = tle->resname;
@ -2776,8 +2765,7 @@ get_target_list(List *targetList, deparse_context *context,
} }
static void static void
get_setop_query(Node *setOp, Query *query, deparse_context *context, get_setop_query(Node *setOp, Query *query, deparse_context *context)
TupleDesc resultDesc, bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
bool need_paren; bool need_paren;
@ -2802,8 +2790,8 @@ get_setop_query(Node *setOp, Query *query, deparse_context *context,
subquery->limitCount); subquery->limitCount);
if (need_paren) if (need_paren)
appendStringInfoChar(buf, '('); appendStringInfoChar(buf, '(');
get_query_def(subquery, buf, context->namespaces, resultDesc, get_query_def(subquery, buf, context->namespaces,
colNamesVisible, context->resultDesc, context->colNamesVisible,
context->prettyFlags, context->wrapColumn, context->prettyFlags, context->wrapColumn,
context->indentLevel); context->indentLevel);
if (need_paren) if (need_paren)
@ -2813,6 +2801,7 @@ get_setop_query(Node *setOp, Query *query, deparse_context *context,
{ {
SetOperationStmt *op = (SetOperationStmt *) setOp; SetOperationStmt *op = (SetOperationStmt *) setOp;
int subindent; int subindent;
bool save_colnamesvisible;
/* /*
* We force parens when nesting two SetOperationStmts, except when the * We force parens when nesting two SetOperationStmts, except when the
@ -2846,7 +2835,7 @@ get_setop_query(Node *setOp, Query *query, deparse_context *context,
else else
subindent = 0; subindent = 0;
get_setop_query(op->larg, query, context, resultDesc, colNamesVisible); get_setop_query(op->larg, query, context);
if (need_paren) if (need_paren)
appendContextKeyword(context, ") ", -subindent, 0, 0); appendContextKeyword(context, ") ", -subindent, 0, 0);
@ -2890,7 +2879,13 @@ get_setop_query(Node *setOp, Query *query, deparse_context *context,
subindent = 0; subindent = 0;
appendContextKeyword(context, "", subindent, 0, 0); appendContextKeyword(context, "", subindent, 0, 0);
get_setop_query(op->rarg, query, context, resultDesc, false); /*
* The output column names of the RHS sub-select don't matter.
*/
save_colnamesvisible = context->colNamesVisible;
context->colNamesVisible = false;
get_setop_query(op->rarg, query, context);
context->colNamesVisible = save_colnamesvisible;
if (PRETTY_INDENT(context)) if (PRETTY_INDENT(context))
context->indentLevel -= subindent; context->indentLevel -= subindent;
@ -2924,20 +2919,31 @@ get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno,
* Use column-number form if requested by caller. Otherwise, if * Use column-number form if requested by caller. Otherwise, if
* expression is a constant, force it to be dumped with an explicit cast * expression is a constant, force it to be dumped with an explicit cast
* as decoration --- this is because a simple integer constant is * as decoration --- this is because a simple integer constant is
* ambiguous (and will be misinterpreted by findTargetlistEntry()) if we * ambiguous (and will be misinterpreted by findTargetlistEntrySQL92()) if
* dump it without any decoration. If it's anything more complex than a * we dump it without any decoration. Similarly, if it's just a Var,
* simple Var, then force extra parens around it, to ensure it can't be * there is risk of misinterpretation if the column name is reassigned in
* misinterpreted as a cube() or rollup() construct. * the SELECT list, so we may need to force table qualification. And, if
* it's anything more complex than a simple Var, then force extra parens
* around it, to ensure it can't be misinterpreted as a cube() or rollup()
* construct.
*/ */
if (force_colno) if (force_colno)
{ {
Assert(!tle->resjunk); Assert(!tle->resjunk);
appendStringInfo(buf, "%d", tle->resno); appendStringInfo(buf, "%d", tle->resno);
} }
else if (expr && IsA(expr, Const)) else if (!expr)
/* do nothing, probably can't happen */ ;
else if (IsA(expr, Const))
get_const_expr((Const *) expr, context, 1); get_const_expr((Const *) expr, context, 1);
else if (!expr || IsA(expr, Var)) else if (IsA(expr, Var))
get_rule_expr(expr, context, true); {
/* Tell get_variable to check for name conflict */
bool save_varinorderby = context->varInOrderBy;
context->varInOrderBy = true;
(void) get_variable((Var *) expr, 0, false, context);
context->varInOrderBy = save_varinorderby;
}
else else
{ {
/* /*
@ -3225,8 +3231,7 @@ get_rule_windowspec(WindowClause *wc, List *targetList,
* ---------- * ----------
*/ */
static void static void
get_insert_query_def(Query *query, deparse_context *context, get_insert_query_def(Query *query, deparse_context *context)
bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
RangeTblEntry *select_rte = NULL; RangeTblEntry *select_rte = NULL;
@ -3405,7 +3410,7 @@ get_insert_query_def(Query *query, deparse_context *context,
{ {
appendContextKeyword(context, " RETURNING", appendContextKeyword(context, " RETURNING",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1); -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_target_list(query->returningList, context, NULL, colNamesVisible); get_target_list(query->returningList, context);
} }
} }
@ -3414,8 +3419,7 @@ get_insert_query_def(Query *query, deparse_context *context,
* ---------- * ----------
*/ */
static void static void
get_update_query_def(Query *query, deparse_context *context, get_update_query_def(Query *query, deparse_context *context)
bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
RangeTblEntry *rte; RangeTblEntry *rte;
@ -3485,7 +3489,7 @@ get_update_query_def(Query *query, deparse_context *context,
{ {
appendContextKeyword(context, " RETURNING", appendContextKeyword(context, " RETURNING",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1); -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_target_list(query->returningList, context, NULL, colNamesVisible); get_target_list(query->returningList, context);
} }
} }
@ -3505,6 +3509,8 @@ get_update_query_targetlist_def(Query *query, List *targetList,
SubLink *cur_ma_sublink; SubLink *cur_ma_sublink;
List *ma_sublinks; List *ma_sublinks;
targetList = ExpandMergedSubscriptingRefEntries(targetList);
/* /*
* Prepare to deal with MULTIEXPR assignments: collect the source SubLinks * Prepare to deal with MULTIEXPR assignments: collect the source SubLinks
* into a list. We expect them to appear, in ID order, in resjunk tlist * into a list. We expect them to appear, in ID order, in resjunk tlist
@ -3528,6 +3534,8 @@ get_update_query_targetlist_def(Query *query, List *targetList,
} }
} }
} }
ensure_update_targetlist_in_param_order(targetList);
} }
next_ma_cell = list_head(ma_sublinks); next_ma_cell = list_head(ma_sublinks);
cur_ma_sublink = NULL; cur_ma_sublink = NULL;
@ -3645,8 +3653,7 @@ get_update_query_targetlist_def(Query *query, List *targetList,
* ---------- * ----------
*/ */
static void static void
get_delete_query_def(Query *query, deparse_context *context, get_delete_query_def(Query *query, deparse_context *context)
bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
RangeTblEntry *rte; RangeTblEntry *rte;
@ -3711,7 +3718,7 @@ get_delete_query_def(Query *query, deparse_context *context,
{ {
appendContextKeyword(context, " RETURNING", appendContextKeyword(context, " RETURNING",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1); -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_target_list(query->returningList, context, NULL, colNamesVisible); get_target_list(query->returningList, context);
} }
} }
@ -3963,6 +3970,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
deparse_columns *colinfo; deparse_columns *colinfo;
char *refname; char *refname;
char *attname; char *attname;
bool need_prefix;
/* Find appropriate nesting depth */ /* Find appropriate nesting depth */
netlevelsup = var->varlevelsup + levelsup; netlevelsup = var->varlevelsup + levelsup;
@ -4163,7 +4171,42 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
attname = get_rte_attribute_name(rte, attnum); attname = get_rte_attribute_name(rte, attnum);
} }
if (refname && (context->varprefix || attname == NULL)) need_prefix = (context->varprefix || attname == NULL);
/*
* If we're considering a plain Var in an ORDER BY (but not GROUP BY)
* clause, we may need to add a table-name prefix to prevent
* findTargetlistEntrySQL92 from misinterpreting the name as an
* output-column name. To avoid cluttering the output with unnecessary
* prefixes, do so only if there is a name match to a SELECT tlist item
* that is different from the Var.
*/
if (context->varInOrderBy && !context->inGroupBy && !need_prefix)
{
int colno = 0;
ListCell *l;
foreach(l, context->targetList)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
char *colname;
if (tle->resjunk)
continue; /* ignore junk entries */
colno++;
/* This must match colname-choosing logic in get_target_list() */
if (context->resultDesc && colno <= context->resultDesc->natts)
colname = NameStr(TupleDescAttr(context->resultDesc,
colno - 1)->attname);
else
colname = tle->resname;
if (colname && strcmp(colname, attname) == 0 &&
!equal(var, tle->expr))
{
need_prefix = true;
break;
}
}
}
if (refname && need_prefix)
{ {
appendStringInfoString(buf, quote_identifier(refname)); appendStringInfoString(buf, quote_identifier(refname));
appendStringInfoChar(buf, '.'); appendStringInfoChar(buf, '.');
@ -6727,7 +6770,7 @@ get_func_expr(FuncExpr *expr, deparse_context *context,
argnames, argtypes, argnames, argtypes,
expr->funcvariadic, expr->funcvariadic,
&use_variadic, &use_variadic,
context->special_exprkind)); context->inGroupBy));
nargs = 0; nargs = 0;
foreach(l, expr->args) foreach(l, expr->args)
{ {
@ -6770,7 +6813,7 @@ get_proc_expr(CallStmt *stmt, deparse_context *context,
namedArgList, argumentTypes, namedArgList, argumentTypes,
stmt->funcexpr->funcvariadic, stmt->funcexpr->funcvariadic,
&use_variadic, &use_variadic,
context->special_exprkind)); context->inGroupBy));
int argNumber = 0; int argNumber = 0;
foreach(argumentCell, finalArgumentList) foreach(argumentCell, finalArgumentList)
{ {
@ -6832,7 +6875,7 @@ get_agg_expr(Aggref *aggref, deparse_context *context,
NIL, argtypes, NIL, argtypes,
aggref->aggvariadic, aggref->aggvariadic,
&use_variadic, &use_variadic,
context->special_exprkind), context->inGroupBy),
(aggref->aggdistinct != NIL) ? "DISTINCT " : ""); (aggref->aggdistinct != NIL) ? "DISTINCT " : "");
if (AGGKIND_IS_ORDERED_SET(aggref->aggkind)) if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
@ -6941,7 +6984,7 @@ get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context)
generate_function_name(wfunc->winfnoid, nargs, generate_function_name(wfunc->winfnoid, nargs,
argnames, argtypes, argnames, argtypes,
false, NULL, false, NULL,
context->special_exprkind)); context->inGroupBy));
/* winstar can be set only in zero-argument aggregates */ /* winstar can be set only in zero-argument aggregates */
if (wfunc->winstar) if (wfunc->winstar)
@ -6966,7 +7009,7 @@ get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context)
if (wc->name) if (wc->name)
appendStringInfoString(buf, quote_identifier(wc->name)); appendStringInfoString(buf, quote_identifier(wc->name));
else else
get_rule_windowspec(wc, context->windowTList, context); get_rule_windowspec(wc, context->targetList, context);
break; break;
} }
} }
@ -8271,7 +8314,7 @@ get_tablesample_def(TableSampleClause *tablesample, deparse_context *context)
appendStringInfo(buf, " TABLESAMPLE %s (", appendStringInfo(buf, " TABLESAMPLE %s (",
generate_function_name(tablesample->tsmhandler, 1, generate_function_name(tablesample->tsmhandler, 1,
NIL, argtypes, NIL, argtypes,
false, NULL, EXPR_KIND_NONE)); false, NULL, false));
nargs = 0; nargs = 0;
foreach(l, tablesample->args) foreach(l, tablesample->args)
@ -8618,12 +8661,14 @@ generate_fragment_name(char *schemaName, char *tableName)
* the output. For non-FuncExpr cases, has_variadic should be false and * the output. For non-FuncExpr cases, has_variadic should be false and
* use_variadic_p can be NULL. * use_variadic_p can be NULL.
* *
* inGroupBy must be true if we're deparsing a GROUP BY clause.
*
* The result includes all necessary quoting and schema-prefixing. * The result includes all necessary quoting and schema-prefixing.
*/ */
static char * static char *
generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes, generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes,
bool has_variadic, bool *use_variadic_p, bool has_variadic, bool *use_variadic_p,
ParseExprKind special_exprkind) bool inGroupBy)
{ {
char *result; char *result;
HeapTuple proctup; HeapTuple proctup;
@ -8648,9 +8693,9 @@ generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes,
/* /*
* Due to parser hacks to avoid needing to reserve CUBE, we need to force * Due to parser hacks to avoid needing to reserve CUBE, we need to force
* qualification in some special cases. * qualification of some function names within GROUP BY.
*/ */
if (special_exprkind == EXPR_KIND_GROUP_BY) if (inGroupBy)
{ {
if (strcmp(proname, "cube") == 0 || strcmp(proname, "rollup") == 0) if (strcmp(proname, "cube") == 0 || strcmp(proname, "rollup") == 0)
force_qualify = true; force_qualify = true;

View File

@ -67,7 +67,6 @@
#include "parser/parse_node.h" #include "parser/parse_node.h"
#include "parser/parse_agg.h" #include "parser/parse_agg.h"
#include "parser/parse_func.h" #include "parser/parse_func.h"
#include "parser/parse_node.h"
#include "parser/parse_oper.h" #include "parser/parse_oper.h"
#include "parser/parse_relation.h" #include "parser/parse_relation.h"
#include "parser/parser.h" #include "parser/parser.h"
@ -123,16 +122,18 @@ typedef struct
{ {
StringInfo buf; /* output buffer to append to */ StringInfo buf; /* output buffer to append to */
List *namespaces; /* List of deparse_namespace nodes */ List *namespaces; /* List of deparse_namespace nodes */
TupleDesc resultDesc; /* if top level of a view, the view's tupdesc */
List *targetList; /* Current query level's SELECT targetlist */
List *windowClause; /* Current query level's WINDOW clause */ List *windowClause; /* Current query level's WINDOW clause */
List *windowTList; /* targetlist for resolving WINDOW clause */
int prettyFlags; /* enabling of pretty-print functions */ int prettyFlags; /* enabling of pretty-print functions */
int wrapColumn; /* max line length, or -1 for no limit */ int wrapColumn; /* max line length, or -1 for no limit */
int indentLevel; /* current indent level for prettyprint */ int indentLevel; /* current indent level for prettyprint */
bool varprefix; /* true to print prefixes on Vars */ bool varprefix; /* true to print prefixes on Vars */
Oid distrelid; /* the distributed table being modified, if valid */ Oid distrelid; /* the distributed table being modified, if valid */
int64 shardid; /* a distributed table's shardid, if positive */ int64 shardid; /* a distributed table's shardid, if positive */
ParseExprKind special_exprkind; /* set only for exprkinds needing special bool colNamesVisible; /* do we care about output column names? */
* handling */ bool inGroupBy; /* deparsing GROUP BY clause? */
bool varInOrderBy; /* deparsing simple Var in ORDER BY? */
Bitmapset *appendparents; /* if not null, map child Vars of these relids Bitmapset *appendparents; /* if not null, map child Vars of these relids
* back to the parent rel */ * back to the parent rel */
} deparse_context; } deparse_context;
@ -364,27 +365,19 @@ static void get_query_def_extended(Query *query, StringInfo buf,
int startIndent); int startIndent);
static void get_values_def(List *values_lists, deparse_context *context); static void get_values_def(List *values_lists, deparse_context *context);
static void get_with_clause(Query *query, deparse_context *context); static void get_with_clause(Query *query, deparse_context *context);
static void get_select_query_def(Query *query, deparse_context *context, static void get_select_query_def(Query *query, deparse_context *context);
TupleDesc resultDesc, bool colNamesVisible); static void get_insert_query_def(Query *query, deparse_context *context);
static void get_insert_query_def(Query *query, deparse_context *context, static void get_update_query_def(Query *query, deparse_context *context);
bool colNamesVisible);
static void get_update_query_def(Query *query, deparse_context *context,
bool colNamesVisible);
static void get_update_query_targetlist_def(Query *query, List *targetList, static void get_update_query_targetlist_def(Query *query, List *targetList,
deparse_context *context, deparse_context *context,
RangeTblEntry *rte); RangeTblEntry *rte);
static void get_delete_query_def(Query *query, deparse_context *context, static void get_delete_query_def(Query *query, deparse_context *context);
bool colNamesVisible); static void get_merge_query_def(Query *query, deparse_context *context);
static void get_merge_query_def(Query *query, deparse_context *context,
bool colNamesVisible);
static void get_utility_query_def(Query *query, deparse_context *context); static void get_utility_query_def(Query *query, deparse_context *context);
static void get_basic_select_query(Query *query, deparse_context *context, static void get_basic_select_query(Query *query, deparse_context *context);
TupleDesc resultDesc, bool colNamesVisible); static void get_target_list(List *targetList, deparse_context *context);
static void get_target_list(List *targetList, deparse_context *context,
TupleDesc resultDesc, bool colNamesVisible);
static void get_setop_query(Node *setOp, Query *query, static void get_setop_query(Node *setOp, Query *query,
deparse_context *context, deparse_context *context);
TupleDesc resultDesc, bool colNamesVisible);
static Node *get_rule_sortgroupclause(Index ref, List *tlist, static Node *get_rule_sortgroupclause(Index ref, List *tlist,
bool force_colno, bool force_colno,
deparse_context *context); deparse_context *context);
@ -479,7 +472,7 @@ static char *generate_fragment_name(char *schemaName, char *tableName);
static char *generate_function_name(Oid funcid, int nargs, static char *generate_function_name(Oid funcid, int nargs,
List *argnames, Oid *argtypes, List *argnames, Oid *argtypes,
bool has_variadic, bool *use_variadic_p, bool has_variadic, bool *use_variadic_p,
ParseExprKind special_exprkind); bool inGroupBy);
static List *get_insert_column_names_list(List *targetList, StringInfo buf, deparse_context *context, RangeTblEntry *rte); static List *get_insert_column_names_list(List *targetList, StringInfo buf, deparse_context *context, RangeTblEntry *rte);
#define only_marker(rte) ((rte)->inh ? "" : "ONLY ") #define only_marker(rte) ((rte)->inh ? "" : "ONLY ")
@ -653,13 +646,16 @@ pg_get_rule_expr(Node *expression)
context.buf = buffer; context.buf = buffer;
context.namespaces = NIL; context.namespaces = NIL;
context.resultDesc = NULL;
context.targetList = NIL;
context.windowClause = NIL; context.windowClause = NIL;
context.windowTList = NIL;
context.varprefix = false; context.varprefix = false;
context.prettyFlags = 0; context.prettyFlags = 0;
context.wrapColumn = WRAP_COLUMN_DEFAULT; context.wrapColumn = WRAP_COLUMN_DEFAULT;
context.indentLevel = 0; context.indentLevel = 0;
context.special_exprkind = EXPR_KIND_NONE; context.colNamesVisible = true;
context.inGroupBy = false;
context.varInOrderBy = false;
context.distrelid = InvalidOid; context.distrelid = InvalidOid;
context.shardid = INVALID_SHARD_ID; context.shardid = INVALID_SHARD_ID;
@ -2080,14 +2076,17 @@ get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace,
context.buf = buf; context.buf = buf;
context.namespaces = lcons(&dpns, list_copy(parentnamespace)); context.namespaces = lcons(&dpns, list_copy(parentnamespace));
context.resultDesc = NULL;
context.targetList = NIL;
context.windowClause = NIL; context.windowClause = NIL;
context.windowTList = NIL;
context.varprefix = (parentnamespace != NIL || context.varprefix = (parentnamespace != NIL ||
list_length(query->rtable) != 1); list_length(query->rtable) != 1);
context.prettyFlags = prettyFlags; context.prettyFlags = prettyFlags;
context.wrapColumn = wrapColumn; context.wrapColumn = wrapColumn;
context.indentLevel = startIndent; context.indentLevel = startIndent;
context.special_exprkind = EXPR_KIND_NONE; context.colNamesVisible = true;
context.inGroupBy = false;
context.varInOrderBy = false;
context.appendparents = NULL; context.appendparents = NULL;
context.distrelid = distrelid; context.distrelid = distrelid;
context.shardid = shardid; context.shardid = shardid;
@ -2097,23 +2096,25 @@ get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace,
switch (query->commandType) switch (query->commandType)
{ {
case CMD_SELECT: case CMD_SELECT:
get_select_query_def(query, &context, resultDesc, colNamesVisible); /* We set context.resultDesc only if it's a SELECT */
context.resultDesc = resultDesc;
get_select_query_def(query, &context);
break; break;
case CMD_UPDATE: case CMD_UPDATE:
get_update_query_def(query, &context, colNamesVisible); get_update_query_def(query, &context);
break; break;
case CMD_INSERT: case CMD_INSERT:
get_insert_query_def(query, &context, colNamesVisible); get_insert_query_def(query, &context);
break; break;
case CMD_DELETE: case CMD_DELETE:
get_delete_query_def(query, &context, colNamesVisible); get_delete_query_def(query, &context);
break; break;
case CMD_MERGE: case CMD_MERGE:
get_merge_query_def(query, &context, colNamesVisible); get_merge_query_def(query, &context);
break; break;
case CMD_NOTHING: case CMD_NOTHING:
@ -2321,23 +2322,18 @@ get_with_clause(Query *query, deparse_context *context)
* ---------- * ----------
*/ */
static void static void
get_select_query_def(Query *query, deparse_context *context, get_select_query_def(Query *query, deparse_context *context)
TupleDesc resultDesc, bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
List *save_windowclause;
List *save_windowtlist;
bool force_colno; bool force_colno;
ListCell *l; ListCell *l;
/* Insert the WITH clause if given */ /* Insert the WITH clause if given */
get_with_clause(query, context); get_with_clause(query, context);
/* Set up context for possible window functions */ /* Subroutines may need to consult the SELECT targetlist and windowClause */
save_windowclause = context->windowClause; context->targetList = query->targetList;
context->windowClause = query->windowClause; context->windowClause = query->windowClause;
save_windowtlist = context->windowTList;
context->windowTList = query->targetList;
/* /*
* If the Query node has a setOperations tree, then it's the top level of * If the Query node has a setOperations tree, then it's the top level of
@ -2346,14 +2342,13 @@ get_select_query_def(Query *query, deparse_context *context,
*/ */
if (query->setOperations) if (query->setOperations)
{ {
get_setop_query(query->setOperations, query, context, resultDesc, get_setop_query(query->setOperations, query, context);
colNamesVisible);
/* ORDER BY clauses must be simple in this case */ /* ORDER BY clauses must be simple in this case */
force_colno = true; force_colno = true;
} }
else else
{ {
get_basic_select_query(query, context, resultDesc, colNamesVisible); get_basic_select_query(query, context);
force_colno = false; force_colno = false;
} }
@ -2443,9 +2438,6 @@ get_select_query_def(Query *query, deparse_context *context,
appendStringInfoString(buf, " SKIP LOCKED"); appendStringInfoString(buf, " SKIP LOCKED");
} }
} }
context->windowClause = save_windowclause;
context->windowTList = save_windowtlist;
} }
/* /*
@ -2520,8 +2512,7 @@ get_simple_values_rte(Query *query, TupleDesc resultDesc)
} }
static void static void
get_basic_select_query(Query *query, deparse_context *context, get_basic_select_query(Query *query, deparse_context *context)
TupleDesc resultDesc, bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
RangeTblEntry *values_rte; RangeTblEntry *values_rte;
@ -2539,7 +2530,7 @@ get_basic_select_query(Query *query, deparse_context *context,
* VALUES part. This reverses what transformValuesClause() did at parse * VALUES part. This reverses what transformValuesClause() did at parse
* time. * time.
*/ */
values_rte = get_simple_values_rte(query, resultDesc); values_rte = get_simple_values_rte(query, context->resultDesc);
if (values_rte) if (values_rte)
{ {
get_values_def(values_rte->values_lists, context); get_values_def(values_rte->values_lists, context);
@ -2577,7 +2568,7 @@ get_basic_select_query(Query *query, deparse_context *context,
} }
/* Then we tell what to select (the targetlist) */ /* Then we tell what to select (the targetlist) */
get_target_list(query->targetList, context, resultDesc, colNamesVisible); get_target_list(query->targetList, context);
/* Add the FROM clause if needed */ /* Add the FROM clause if needed */
get_from_clause(query, " FROM ", context); get_from_clause(query, " FROM ", context);
@ -2593,15 +2584,15 @@ get_basic_select_query(Query *query, deparse_context *context,
/* Add the GROUP BY clause if given */ /* Add the GROUP BY clause if given */
if (query->groupClause != NULL || query->groupingSets != NULL) if (query->groupClause != NULL || query->groupingSets != NULL)
{ {
ParseExprKind save_exprkind; bool save_ingroupby;
appendContextKeyword(context, " GROUP BY ", appendContextKeyword(context, " GROUP BY ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1); -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
if (query->groupDistinct) if (query->groupDistinct)
appendStringInfoString(buf, "DISTINCT "); appendStringInfoString(buf, "DISTINCT ");
save_exprkind = context->special_exprkind; save_ingroupby = context->inGroupBy;
context->special_exprkind = EXPR_KIND_GROUP_BY; context->inGroupBy = true;
if (query->groupingSets == NIL) if (query->groupingSets == NIL)
{ {
@ -2629,7 +2620,7 @@ get_basic_select_query(Query *query, deparse_context *context,
} }
} }
context->special_exprkind = save_exprkind; context->inGroupBy = save_ingroupby;
} }
/* Add the HAVING clause if given */ /* Add the HAVING clause if given */
@ -2648,14 +2639,11 @@ get_basic_select_query(Query *query, deparse_context *context,
/* ---------- /* ----------
* get_target_list - Parse back a SELECT target list * get_target_list - Parse back a SELECT target list
* *
* This is also used for RETURNING lists in INSERT/UPDATE/DELETE. * This is also used for RETURNING lists in INSERT/UPDATE/DELETE/MERGE.
*
* resultDesc and colNamesVisible are as for get_query_def()
* ---------- * ----------
*/ */
static void static void
get_target_list(List *targetList, deparse_context *context, get_target_list(List *targetList, deparse_context *context)
TupleDesc resultDesc, bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
StringInfoData targetbuf; StringInfoData targetbuf;
@ -2712,7 +2700,7 @@ get_target_list(List *targetList, deparse_context *context,
* assigned column name explicitly. Otherwise, show it only if * assigned column name explicitly. Otherwise, show it only if
* it's not FigureColname's fallback. * it's not FigureColname's fallback.
*/ */
attname = colNamesVisible ? NULL : "?column?"; attname = context->colNamesVisible ? NULL : "?column?";
} }
/* /*
@ -2721,8 +2709,9 @@ get_target_list(List *targetList, deparse_context *context,
* effects of any column RENAME that's been done on the view). * effects of any column RENAME that's been done on the view).
* Otherwise, just use what we can find in the TLE. * Otherwise, just use what we can find in the TLE.
*/ */
if (resultDesc && colno <= resultDesc->natts) if (context->resultDesc && colno <= context->resultDesc->natts)
colname = NameStr(TupleDescAttr(resultDesc, colno - 1)->attname); colname = NameStr(TupleDescAttr(context->resultDesc,
colno - 1)->attname);
else else
colname = tle->resname; colname = tle->resname;
@ -2790,8 +2779,7 @@ get_target_list(List *targetList, deparse_context *context,
} }
static void static void
get_setop_query(Node *setOp, Query *query, deparse_context *context, get_setop_query(Node *setOp, Query *query, deparse_context *context)
TupleDesc resultDesc, bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
bool need_paren; bool need_paren;
@ -2816,8 +2804,8 @@ get_setop_query(Node *setOp, Query *query, deparse_context *context,
subquery->limitCount); subquery->limitCount);
if (need_paren) if (need_paren)
appendStringInfoChar(buf, '('); appendStringInfoChar(buf, '(');
get_query_def(subquery, buf, context->namespaces, resultDesc, get_query_def(subquery, buf, context->namespaces,
colNamesVisible, context->resultDesc, context->colNamesVisible,
context->prettyFlags, context->wrapColumn, context->prettyFlags, context->wrapColumn,
context->indentLevel); context->indentLevel);
if (need_paren) if (need_paren)
@ -2827,6 +2815,7 @@ get_setop_query(Node *setOp, Query *query, deparse_context *context,
{ {
SetOperationStmt *op = (SetOperationStmt *) setOp; SetOperationStmt *op = (SetOperationStmt *) setOp;
int subindent; int subindent;
bool save_colnamesvisible;
/* /*
* We force parens when nesting two SetOperationStmts, except when the * We force parens when nesting two SetOperationStmts, except when the
@ -2860,7 +2849,7 @@ get_setop_query(Node *setOp, Query *query, deparse_context *context,
else else
subindent = 0; subindent = 0;
get_setop_query(op->larg, query, context, resultDesc, colNamesVisible); get_setop_query(op->larg, query, context);
if (need_paren) if (need_paren)
appendContextKeyword(context, ") ", -subindent, 0, 0); appendContextKeyword(context, ") ", -subindent, 0, 0);
@ -2904,7 +2893,13 @@ get_setop_query(Node *setOp, Query *query, deparse_context *context,
subindent = 0; subindent = 0;
appendContextKeyword(context, "", subindent, 0, 0); appendContextKeyword(context, "", subindent, 0, 0);
get_setop_query(op->rarg, query, context, resultDesc, false); /*
* The output column names of the RHS sub-select don't matter.
*/
save_colnamesvisible = context->colNamesVisible;
context->colNamesVisible = false;
get_setop_query(op->rarg, query, context);
context->colNamesVisible = save_colnamesvisible;
if (PRETTY_INDENT(context)) if (PRETTY_INDENT(context))
context->indentLevel -= subindent; context->indentLevel -= subindent;
@ -2938,20 +2933,31 @@ get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno,
* Use column-number form if requested by caller. Otherwise, if * Use column-number form if requested by caller. Otherwise, if
* expression is a constant, force it to be dumped with an explicit cast * expression is a constant, force it to be dumped with an explicit cast
* as decoration --- this is because a simple integer constant is * as decoration --- this is because a simple integer constant is
* ambiguous (and will be misinterpreted by findTargetlistEntry()) if we * ambiguous (and will be misinterpreted by findTargetlistEntrySQL92()) if
* dump it without any decoration. If it's anything more complex than a * we dump it without any decoration. Similarly, if it's just a Var,
* simple Var, then force extra parens around it, to ensure it can't be * there is risk of misinterpretation if the column name is reassigned in
* misinterpreted as a cube() or rollup() construct. * the SELECT list, so we may need to force table qualification. And, if
* it's anything more complex than a simple Var, then force extra parens
* around it, to ensure it can't be misinterpreted as a cube() or rollup()
* construct.
*/ */
if (force_colno) if (force_colno)
{ {
Assert(!tle->resjunk); Assert(!tle->resjunk);
appendStringInfo(buf, "%d", tle->resno); appendStringInfo(buf, "%d", tle->resno);
} }
else if (expr && IsA(expr, Const)) else if (!expr)
/* do nothing, probably can't happen */ ;
else if (IsA(expr, Const))
get_const_expr((Const *) expr, context, 1); get_const_expr((Const *) expr, context, 1);
else if (!expr || IsA(expr, Var)) else if (IsA(expr, Var))
get_rule_expr(expr, context, true); {
/* Tell get_variable to check for name conflict */
bool save_varinorderby = context->varInOrderBy;
context->varInOrderBy = true;
(void) get_variable((Var *) expr, 0, false, context);
context->varInOrderBy = save_varinorderby;
}
else else
{ {
/* /*
@ -3240,8 +3246,7 @@ get_rule_windowspec(WindowClause *wc, List *targetList,
* ---------- * ----------
*/ */
static void static void
get_insert_query_def(Query *query, deparse_context *context, get_insert_query_def(Query *query, deparse_context *context)
bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
RangeTblEntry *select_rte = NULL; RangeTblEntry *select_rte = NULL;
@ -3422,7 +3427,7 @@ get_insert_query_def(Query *query, deparse_context *context,
{ {
appendContextKeyword(context, " RETURNING", appendContextKeyword(context, " RETURNING",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1); -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_target_list(query->returningList, context, NULL, colNamesVisible); get_target_list(query->returningList, context);
} }
} }
@ -3431,8 +3436,7 @@ get_insert_query_def(Query *query, deparse_context *context,
* ---------- * ----------
*/ */
static void static void
get_update_query_def(Query *query, deparse_context *context, get_update_query_def(Query *query, deparse_context *context)
bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
RangeTblEntry *rte; RangeTblEntry *rte;
@ -3501,7 +3505,7 @@ get_update_query_def(Query *query, deparse_context *context,
{ {
appendContextKeyword(context, " RETURNING", appendContextKeyword(context, " RETURNING",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1); -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_target_list(query->returningList, context, NULL, colNamesVisible); get_target_list(query->returningList, context);
} }
} }
@ -3521,6 +3525,8 @@ get_update_query_targetlist_def(Query *query, List *targetList,
SubLink *cur_ma_sublink; SubLink *cur_ma_sublink;
List *ma_sublinks; List *ma_sublinks;
targetList = ExpandMergedSubscriptingRefEntries(targetList);
/* /*
* Prepare to deal with MULTIEXPR assignments: collect the source SubLinks * Prepare to deal with MULTIEXPR assignments: collect the source SubLinks
* into a list. We expect them to appear, in ID order, in resjunk tlist * into a list. We expect them to appear, in ID order, in resjunk tlist
@ -3544,6 +3550,8 @@ get_update_query_targetlist_def(Query *query, List *targetList,
} }
} }
} }
ensure_update_targetlist_in_param_order(targetList);
} }
next_ma_cell = list_head(ma_sublinks); next_ma_cell = list_head(ma_sublinks);
cur_ma_sublink = NULL; cur_ma_sublink = NULL;
@ -3661,8 +3669,7 @@ get_update_query_targetlist_def(Query *query, List *targetList,
* ---------- * ----------
*/ */
static void static void
get_delete_query_def(Query *query, deparse_context *context, get_delete_query_def(Query *query, deparse_context *context)
bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
RangeTblEntry *rte; RangeTblEntry *rte;
@ -3726,7 +3733,7 @@ get_delete_query_def(Query *query, deparse_context *context,
{ {
appendContextKeyword(context, " RETURNING", appendContextKeyword(context, " RETURNING",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1); -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_target_list(query->returningList, context, NULL, colNamesVisible); get_target_list(query->returningList, context);
} }
} }
@ -3736,8 +3743,7 @@ get_delete_query_def(Query *query, deparse_context *context,
* ---------- * ----------
*/ */
static void static void
get_merge_query_def(Query *query, deparse_context *context, get_merge_query_def(Query *query, deparse_context *context)
bool colNamesVisible)
{ {
StringInfo buf = context->buf; StringInfo buf = context->buf;
RangeTblEntry *rte; RangeTblEntry *rte;
@ -3977,6 +3983,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
deparse_columns *colinfo; deparse_columns *colinfo;
char *refname; char *refname;
char *attname; char *attname;
bool need_prefix;
/* Find appropriate nesting depth */ /* Find appropriate nesting depth */
netlevelsup = var->varlevelsup + levelsup; netlevelsup = var->varlevelsup + levelsup;
@ -4177,7 +4184,42 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
attname = get_rte_attribute_name(rte, attnum); attname = get_rte_attribute_name(rte, attnum);
} }
if (refname && (context->varprefix || attname == NULL)) need_prefix = (context->varprefix || attname == NULL);
/*
* If we're considering a plain Var in an ORDER BY (but not GROUP BY)
* clause, we may need to add a table-name prefix to prevent
* findTargetlistEntrySQL92 from misinterpreting the name as an
* output-column name. To avoid cluttering the output with unnecessary
* prefixes, do so only if there is a name match to a SELECT tlist item
* that is different from the Var.
*/
if (context->varInOrderBy && !context->inGroupBy && !need_prefix)
{
int colno = 0;
ListCell *l;
foreach(l, context->targetList)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
char *colname;
if (tle->resjunk)
continue; /* ignore junk entries */
colno++;
/* This must match colname-choosing logic in get_target_list() */
if (context->resultDesc && colno <= context->resultDesc->natts)
colname = NameStr(TupleDescAttr(context->resultDesc,
colno - 1)->attname);
else
colname = tle->resname;
if (colname && strcmp(colname, attname) == 0 &&
!equal(var, tle->expr))
{
need_prefix = true;
break;
}
}
}
if (refname && need_prefix)
{ {
appendStringInfoString(buf, quote_identifier(refname)); appendStringInfoString(buf, quote_identifier(refname));
appendStringInfoChar(buf, '.'); appendStringInfoChar(buf, '.');
@ -6775,7 +6817,7 @@ get_func_expr(FuncExpr *expr, deparse_context *context,
argnames, argtypes, argnames, argtypes,
expr->funcvariadic, expr->funcvariadic,
&use_variadic, &use_variadic,
context->special_exprkind)); context->inGroupBy));
nargs = 0; nargs = 0;
foreach(l, expr->args) foreach(l, expr->args)
{ {
@ -6818,7 +6860,7 @@ get_proc_expr(CallStmt *stmt, deparse_context *context,
namedArgList, argumentTypes, namedArgList, argumentTypes,
stmt->funcexpr->funcvariadic, stmt->funcexpr->funcvariadic,
&use_variadic, &use_variadic,
context->special_exprkind)); context->inGroupBy));
int argNumber = 0; int argNumber = 0;
foreach(argumentCell, finalArgumentList) foreach(argumentCell, finalArgumentList)
{ {
@ -6891,7 +6933,7 @@ get_agg_expr_helper(Aggref *aggref, deparse_context *context,
funcname = generate_function_name(aggref->aggfnoid, nargs, NIL, funcname = generate_function_name(aggref->aggfnoid, nargs, NIL,
argtypes, aggref->aggvariadic, argtypes, aggref->aggvariadic,
&use_variadic, &use_variadic,
context->special_exprkind); context->inGroupBy);
/* Print the aggregate name, schema-qualified if needed */ /* Print the aggregate name, schema-qualified if needed */
appendStringInfo(buf, "%s(%s", funcname, appendStringInfo(buf, "%s(%s", funcname,
@ -7032,7 +7074,7 @@ get_windowfunc_expr_helper(WindowFunc *wfunc, deparse_context *context,
if (!funcname) if (!funcname)
funcname = generate_function_name(wfunc->winfnoid, nargs, argnames, funcname = generate_function_name(wfunc->winfnoid, nargs, argnames,
argtypes, false, NULL, argtypes, false, NULL,
context->special_exprkind); context->inGroupBy);
appendStringInfo(buf, "%s(", funcname); appendStringInfo(buf, "%s(", funcname);
@ -7071,7 +7113,7 @@ get_windowfunc_expr_helper(WindowFunc *wfunc, deparse_context *context,
if (wc->name) if (wc->name)
appendStringInfoString(buf, quote_identifier(wc->name)); appendStringInfoString(buf, quote_identifier(wc->name));
else else
get_rule_windowspec(wc, context->windowTList, context); get_rule_windowspec(wc, context->targetList, context);
break; break;
} }
} }
@ -8547,7 +8589,7 @@ get_tablesample_def(TableSampleClause *tablesample, deparse_context *context)
appendStringInfo(buf, " TABLESAMPLE %s (", appendStringInfo(buf, " TABLESAMPLE %s (",
generate_function_name(tablesample->tsmhandler, 1, generate_function_name(tablesample->tsmhandler, 1,
NIL, argtypes, NIL, argtypes,
false, NULL, EXPR_KIND_NONE)); false, NULL, false));
nargs = 0; nargs = 0;
foreach(l, tablesample->args) foreach(l, tablesample->args)
@ -8894,12 +8936,14 @@ generate_fragment_name(char *schemaName, char *tableName)
* the output. For non-FuncExpr cases, has_variadic should be false and * the output. For non-FuncExpr cases, has_variadic should be false and
* use_variadic_p can be NULL. * use_variadic_p can be NULL.
* *
* inGroupBy must be true if we're deparsing a GROUP BY clause.
*
* The result includes all necessary quoting and schema-prefixing. * The result includes all necessary quoting and schema-prefixing.
*/ */
static char * static char *
generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes, generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes,
bool has_variadic, bool *use_variadic_p, bool has_variadic, bool *use_variadic_p,
ParseExprKind special_exprkind) bool inGroupBy)
{ {
char *result; char *result;
HeapTuple proctup; HeapTuple proctup;
@ -8924,9 +8968,9 @@ generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes,
/* /*
* Due to parser hacks to avoid needing to reserve CUBE, we need to force * Due to parser hacks to avoid needing to reserve CUBE, we need to force
* qualification in some special cases. * qualification of some function names within GROUP BY.
*/ */
if (special_exprkind == EXPR_KIND_GROUP_BY) if (inGroupBy)
{ {
if (strcmp(proname, "cube") == 0 || strcmp(proname, "rollup") == 0) if (strcmp(proname, "cube") == 0 || strcmp(proname, "rollup") == 0)
force_qualify = true; force_qualify = true;

File diff suppressed because it is too large Load Diff

View File

@ -171,6 +171,7 @@
#include "distributed/repartition_join_execution.h" #include "distributed/repartition_join_execution.h"
#include "distributed/resource_lock.h" #include "distributed/resource_lock.h"
#include "distributed/shared_connection_stats.h" #include "distributed/shared_connection_stats.h"
#include "distributed/stats/stat_counters.h"
#include "distributed/subplan_execution.h" #include "distributed/subplan_execution.h"
#include "distributed/transaction_identifier.h" #include "distributed/transaction_identifier.h"
#include "distributed/transaction_management.h" #include "distributed/transaction_management.h"
@ -690,7 +691,7 @@ static bool SendNextQuery(TaskPlacementExecution *placementExecution,
WorkerSession *session); WorkerSession *session);
static void ConnectionStateMachine(WorkerSession *session); static void ConnectionStateMachine(WorkerSession *session);
static bool HasUnfinishedTaskForSession(WorkerSession *session); static bool HasUnfinishedTaskForSession(WorkerSession *session);
static void HandleMultiConnectionSuccess(WorkerSession *session); static void HandleMultiConnectionSuccess(WorkerSession *session, bool newConnection);
static bool HasAnyConnectionFailure(WorkerPool *workerPool); static bool HasAnyConnectionFailure(WorkerPool *workerPool);
static void Activate2PCIfModifyingTransactionExpandsToNewNode(WorkerSession *session); static void Activate2PCIfModifyingTransactionExpandsToNewNode(WorkerSession *session);
static bool TransactionModifiedDistributedTable(DistributedExecution *execution); static bool TransactionModifiedDistributedTable(DistributedExecution *execution);
@ -718,10 +719,8 @@ static void RebuildWaitEventSetForSessions(DistributedExecution *execution);
static void AddLatchWaitEventToExecution(DistributedExecution *execution); static void AddLatchWaitEventToExecution(DistributedExecution *execution);
static void ProcessWaitEvents(DistributedExecution *execution, WaitEvent *events, int static void ProcessWaitEvents(DistributedExecution *execution, WaitEvent *events, int
eventCount, bool *cancellationReceived); eventCount, bool *cancellationReceived);
#if PG_VERSION_NUM >= PG_VERSION_15
static void RemoteSocketClosedForAnySession(DistributedExecution *execution); static void RemoteSocketClosedForAnySession(DistributedExecution *execution);
static void ProcessWaitEventsForSocketClosed(WaitEvent *events, int eventCount); static void ProcessWaitEventsForSocketClosed(WaitEvent *events, int eventCount);
#endif
static long MillisecondsBetweenTimestamps(instr_time startTime, instr_time endTime); static long MillisecondsBetweenTimestamps(instr_time startTime, instr_time endTime);
static uint64 MicrosecondsBetweenTimestamps(instr_time startTime, instr_time endTime); static uint64 MicrosecondsBetweenTimestamps(instr_time startTime, instr_time endTime);
static int WorkerPoolCompare(const void *lhsKey, const void *rhsKey); static int WorkerPoolCompare(const void *lhsKey, const void *rhsKey);
@ -761,7 +760,7 @@ AdaptiveExecutorPreExecutorRun(CitusScanState *scanState)
*/ */
LockPartitionsForDistributedPlan(distributedPlan); LockPartitionsForDistributedPlan(distributedPlan);
ExecuteSubPlans(distributedPlan); ExecuteSubPlans(distributedPlan, RequestedForExplainAnalyze(scanState));
scanState->finishedPreScan = true; scanState->finishedPreScan = true;
} }
@ -1430,7 +1429,7 @@ AssignTasksToConnectionsOrWorkerPool(DistributedExecution *execution)
List *taskList = execution->remoteTaskList; List *taskList = execution->remoteTaskList;
Task *task = NULL; Task *task = NULL;
foreach_ptr(task, taskList) foreach_declared_ptr(task, taskList)
{ {
bool placementExecutionReady = true; bool placementExecutionReady = true;
int placementExecutionIndex = 0; int placementExecutionIndex = 0;
@ -1453,7 +1452,7 @@ AssignTasksToConnectionsOrWorkerPool(DistributedExecution *execution)
SetAttributeInputMetadata(execution, shardCommandExecution); SetAttributeInputMetadata(execution, shardCommandExecution);
ShardPlacement *taskPlacement = NULL; ShardPlacement *taskPlacement = NULL;
foreach_ptr(taskPlacement, task->taskPlacementList) foreach_declared_ptr(taskPlacement, task->taskPlacementList)
{ {
int connectionFlags = 0; int connectionFlags = 0;
char *nodeName = NULL; char *nodeName = NULL;
@ -1598,7 +1597,7 @@ AssignTasksToConnectionsOrWorkerPool(DistributedExecution *execution)
* connection may be be returned multiple times by GetPlacementListConnectionIfCached. * connection may be be returned multiple times by GetPlacementListConnectionIfCached.
*/ */
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, execution->sessionList) foreach_declared_ptr(session, execution->sessionList)
{ {
MultiConnection *connection = session->connection; MultiConnection *connection = session->connection;
@ -1721,7 +1720,7 @@ static WorkerPool *
FindOrCreateWorkerPool(DistributedExecution *execution, char *nodeName, int nodePort) FindOrCreateWorkerPool(DistributedExecution *execution, char *nodeName, int nodePort)
{ {
WorkerPool *workerPool = NULL; WorkerPool *workerPool = NULL;
foreach_ptr(workerPool, execution->workerList) foreach_declared_ptr(workerPool, execution->workerList)
{ {
if (strncmp(nodeName, workerPool->nodeName, WORKER_LENGTH) == 0 && if (strncmp(nodeName, workerPool->nodeName, WORKER_LENGTH) == 0 &&
nodePort == workerPool->nodePort) nodePort == workerPool->nodePort)
@ -1768,7 +1767,7 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
static uint64 sessionId = 1; static uint64 sessionId = 1;
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, workerPool->sessionList) foreach_declared_ptr(session, workerPool->sessionList)
{ {
if (session->connection == connection) if (session->connection == connection)
{ {
@ -1784,11 +1783,8 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
session->commandsSent = 0; session->commandsSent = 0;
session->waitEventSetIndex = WAIT_EVENT_SET_INDEX_NOT_INITIALIZED; session->waitEventSetIndex = WAIT_EVENT_SET_INDEX_NOT_INITIALIZED;
#if PG_VERSION_NUM >= PG_VERSION_15
/* always detect closed sockets */ /* always detect closed sockets */
UpdateConnectionWaitFlags(session, WL_SOCKET_CLOSED); UpdateConnectionWaitFlags(session, WL_SOCKET_CLOSED);
#endif
dlist_init(&session->pendingTaskQueue); dlist_init(&session->pendingTaskQueue);
dlist_init(&session->readyTaskQueue); dlist_init(&session->readyTaskQueue);
@ -1832,7 +1828,6 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
* the events, even ignores cancellation events. Future callers of this * the events, even ignores cancellation events. Future callers of this
* function should consider its limitations. * function should consider its limitations.
*/ */
#if PG_VERSION_NUM >= PG_VERSION_15
static void static void
RemoteSocketClosedForAnySession(DistributedExecution *execution) RemoteSocketClosedForAnySession(DistributedExecution *execution)
{ {
@ -1850,9 +1845,6 @@ RemoteSocketClosedForAnySession(DistributedExecution *execution)
} }
#endif
/* /*
* SequentialRunDistributedExecution gets a distributed execution and * SequentialRunDistributedExecution gets a distributed execution and
* executes each individual task in the execution sequentially, one * executes each individual task in the execution sequentially, one
@ -1871,7 +1863,7 @@ SequentialRunDistributedExecution(DistributedExecution *execution)
*/ */
MultiShardConnectionType = SEQUENTIAL_CONNECTION; MultiShardConnectionType = SEQUENTIAL_CONNECTION;
Task *taskToExecute = NULL; Task *taskToExecute = NULL;
foreach_ptr(taskToExecute, taskList) foreach_declared_ptr(taskToExecute, taskList)
{ {
execution->remoteAndLocalTaskList = list_make1(taskToExecute); execution->remoteAndLocalTaskList = list_make1(taskToExecute);
execution->remoteTaskList = list_make1(taskToExecute); execution->remoteTaskList = list_make1(taskToExecute);
@ -1911,7 +1903,7 @@ RunDistributedExecution(DistributedExecution *execution)
{ {
/* Preemptively step state machines in case of immediate errors */ /* Preemptively step state machines in case of immediate errors */
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, execution->sessionList) foreach_declared_ptr(session, execution->sessionList)
{ {
ConnectionStateMachine(session); ConnectionStateMachine(session);
} }
@ -1943,7 +1935,7 @@ RunDistributedExecution(DistributedExecution *execution)
HasIncompleteConnectionEstablishment(execution))) HasIncompleteConnectionEstablishment(execution)))
{ {
WorkerPool *workerPool = NULL; WorkerPool *workerPool = NULL;
foreach_ptr(workerPool, execution->workerList) foreach_declared_ptr(workerPool, execution->workerList)
{ {
ManageWorkerPool(workerPool); ManageWorkerPool(workerPool);
} }
@ -2028,7 +2020,7 @@ ProcessSessionsWithFailedWaitEventSetOperations(DistributedExecution *execution)
{ {
bool foundFailedSession = false; bool foundFailedSession = false;
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, execution->sessionList) foreach_declared_ptr(session, execution->sessionList)
{ {
if (session->waitEventSetIndex == WAIT_EVENT_SET_INDEX_FAILED) if (session->waitEventSetIndex == WAIT_EVENT_SET_INDEX_FAILED)
{ {
@ -2044,6 +2036,7 @@ ProcessSessionsWithFailedWaitEventSetOperations(DistributedExecution *execution)
else else
{ {
connection->connectionState = MULTI_CONNECTION_FAILED; connection->connectionState = MULTI_CONNECTION_FAILED;
IncrementStatCounterForMyDb(STAT_CONNECTION_ESTABLISHMENT_FAILED);
} }
@ -2072,7 +2065,7 @@ HasIncompleteConnectionEstablishment(DistributedExecution *execution)
} }
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, execution->sessionList) foreach_declared_ptr(session, execution->sessionList)
{ {
MultiConnection *connection = session->connection; MultiConnection *connection = session->connection;
if (connection->connectionState == MULTI_CONNECTION_INITIAL || if (connection->connectionState == MULTI_CONNECTION_INITIAL ||
@ -2188,8 +2181,6 @@ ProcessWaitEvents(DistributedExecution *execution, WaitEvent *events, int eventC
} }
#if PG_VERSION_NUM >= PG_VERSION_15
/* /*
* ProcessWaitEventsForSocketClosed mainly checks for WL_SOCKET_CLOSED event. * ProcessWaitEventsForSocketClosed mainly checks for WL_SOCKET_CLOSED event.
* If WL_SOCKET_CLOSED is found, the function sets the underlying connection's * If WL_SOCKET_CLOSED is found, the function sets the underlying connection's
@ -2222,9 +2213,6 @@ ProcessWaitEventsForSocketClosed(WaitEvent *events, int eventCount)
} }
#endif
/* /*
* ManageWorkerPool ensures the worker pool has the appropriate number of connections * ManageWorkerPool ensures the worker pool has the appropriate number of connections
* based on the number of pending tasks. * based on the number of pending tasks.
@ -2550,7 +2538,7 @@ AvgTaskExecutionTimeApproximation(WorkerPool *workerPool)
INSTR_TIME_SET_CURRENT(now); INSTR_TIME_SET_CURRENT(now);
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, workerPool->sessionList) foreach_declared_ptr(session, workerPool->sessionList)
{ {
/* /*
* Involve the tasks that are currently running. We do this to * Involve the tasks that are currently running. We do this to
@ -2588,7 +2576,7 @@ AvgConnectionEstablishmentTime(WorkerPool *workerPool)
int sessionCount = 0; int sessionCount = 0;
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, workerPool->sessionList) foreach_declared_ptr(session, workerPool->sessionList)
{ {
MultiConnection *connection = session->connection; MultiConnection *connection = session->connection;
@ -2719,7 +2707,6 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
* Instead, we prefer this slight difference, which in effect has almost no * Instead, we prefer this slight difference, which in effect has almost no
* difference, but doing things in different points in time. * difference, but doing things in different points in time.
*/ */
#if PG_VERSION_NUM >= PG_VERSION_15
/* we added new connections, rebuild the waitEventSet */ /* we added new connections, rebuild the waitEventSet */
RebuildWaitEventSetForSessions(execution); RebuildWaitEventSetForSessions(execution);
@ -2739,12 +2726,9 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
* of the execution. * of the execution.
*/ */
AddLatchWaitEventToExecution(execution); AddLatchWaitEventToExecution(execution);
#else
execution->rebuildWaitEventSet = true;
#endif
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, newSessionsList) foreach_declared_ptr(session, newSessionsList)
{ {
/* immediately run the state machine to handle potential failure */ /* immediately run the state machine to handle potential failure */
ConnectionStateMachine(session); ConnectionStateMachine(session);
@ -2828,21 +2812,21 @@ CheckConnectionTimeout(WorkerPool *workerPool)
logLevel = ERROR; logLevel = ERROR;
} }
ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("could not establish any connections to the node "
"%s:%d after %u ms", workerPool->nodeName,
workerPool->nodePort,
NodeConnectionTimeout)));
/* /*
* We hit the connection timeout. In that case, we should not let the * We hit the connection timeout. In that case, we should not let the
* connection establishment to continue because the execution logic * connection establishment to continue because the execution logic
* pretends that failed sessions are not going to be used anymore. * pretends that failed sessions are not going to be used anymore.
* *
* That's why we mark the connection as timed out to trigger the state * That's why we mark the connection as timed out to trigger the state
* changes in the executor. * changes in the executor, if we don't throw an error below.
*/ */
MarkEstablishingSessionsTimedOut(workerPool); MarkEstablishingSessionsTimedOut(workerPool);
ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("could not establish any connections to the node "
"%s:%d after %u ms", workerPool->nodeName,
workerPool->nodePort,
NodeConnectionTimeout)));
} }
else else
{ {
@ -2862,7 +2846,7 @@ static void
MarkEstablishingSessionsTimedOut(WorkerPool *workerPool) MarkEstablishingSessionsTimedOut(WorkerPool *workerPool)
{ {
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, workerPool->sessionList) foreach_declared_ptr(session, workerPool->sessionList)
{ {
MultiConnection *connection = session->connection; MultiConnection *connection = session->connection;
@ -2870,6 +2854,7 @@ MarkEstablishingSessionsTimedOut(WorkerPool *workerPool)
connection->connectionState == MULTI_CONNECTION_INITIAL) connection->connectionState == MULTI_CONNECTION_INITIAL)
{ {
connection->connectionState = MULTI_CONNECTION_TIMED_OUT; connection->connectionState = MULTI_CONNECTION_TIMED_OUT;
IncrementStatCounterForMyDb(STAT_CONNECTION_ESTABLISHMENT_FAILED);
} }
} }
} }
@ -2914,7 +2899,7 @@ NextEventTimeout(DistributedExecution *execution)
long eventTimeout = 1000; /* milliseconds */ long eventTimeout = 1000; /* milliseconds */
WorkerPool *workerPool = NULL; WorkerPool *workerPool = NULL;
foreach_ptr(workerPool, execution->workerList) foreach_declared_ptr(workerPool, execution->workerList)
{ {
if (workerPool->failureState == WORKER_POOL_FAILED) if (workerPool->failureState == WORKER_POOL_FAILED)
{ {
@ -3027,6 +3012,10 @@ ConnectionStateMachine(WorkerSession *session)
* the state machines might have already progressed and used * the state machines might have already progressed and used
* new pools/sessions instead. That's why we terminate the * new pools/sessions instead. That's why we terminate the
* connection, clear any state associated with it. * connection, clear any state associated with it.
*
* Note that here we don't increment the failed connection
* stat counter because MarkEstablishingSessionsTimedOut()
* already did that.
*/ */
connection->connectionState = MULTI_CONNECTION_FAILED; connection->connectionState = MULTI_CONNECTION_FAILED;
break; break;
@ -3037,7 +3026,12 @@ ConnectionStateMachine(WorkerSession *session)
ConnStatusType status = PQstatus(connection->pgConn); ConnStatusType status = PQstatus(connection->pgConn);
if (status == CONNECTION_OK) if (status == CONNECTION_OK)
{ {
HandleMultiConnectionSuccess(session); /*
* Connection was already established, possibly a cached
* connection.
*/
bool newConnection = false;
HandleMultiConnectionSuccess(session, newConnection);
UpdateConnectionWaitFlags(session, UpdateConnectionWaitFlags(session,
WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE); WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE);
break; break;
@ -3045,6 +3039,7 @@ ConnectionStateMachine(WorkerSession *session)
else if (status == CONNECTION_BAD) else if (status == CONNECTION_BAD)
{ {
connection->connectionState = MULTI_CONNECTION_FAILED; connection->connectionState = MULTI_CONNECTION_FAILED;
IncrementStatCounterForMyDb(STAT_CONNECTION_ESTABLISHMENT_FAILED);
break; break;
} }
@ -3060,6 +3055,7 @@ ConnectionStateMachine(WorkerSession *session)
if (pollMode == PGRES_POLLING_FAILED) if (pollMode == PGRES_POLLING_FAILED)
{ {
connection->connectionState = MULTI_CONNECTION_FAILED; connection->connectionState = MULTI_CONNECTION_FAILED;
IncrementStatCounterForMyDb(STAT_CONNECTION_ESTABLISHMENT_FAILED);
} }
else if (pollMode == PGRES_POLLING_READING) else if (pollMode == PGRES_POLLING_READING)
{ {
@ -3077,7 +3073,12 @@ ConnectionStateMachine(WorkerSession *session)
} }
else else
{ {
HandleMultiConnectionSuccess(session); /*
* Connection was not established befoore (!= CONNECTION_OK)
* but PQconnectPoll() did so now.
*/
bool newConnection = true;
HandleMultiConnectionSuccess(session, newConnection);
UpdateConnectionWaitFlags(session, UpdateConnectionWaitFlags(session,
WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE); WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE);
@ -3155,6 +3156,11 @@ ConnectionStateMachine(WorkerSession *session)
break; break;
} }
/*
* Here we don't increment the connection stat counter for failed
* connections because we don't track the connections that we could
* establish but lost later.
*/
connection->connectionState = MULTI_CONNECTION_FAILED; connection->connectionState = MULTI_CONNECTION_FAILED;
break; break;
} }
@ -3317,12 +3323,12 @@ HasUnfinishedTaskForSession(WorkerSession *session)
* connection's state. * connection's state.
*/ */
static void static void
HandleMultiConnectionSuccess(WorkerSession *session) HandleMultiConnectionSuccess(WorkerSession *session, bool newConnection)
{ {
MultiConnection *connection = session->connection; MultiConnection *connection = session->connection;
WorkerPool *workerPool = session->workerPool; WorkerPool *workerPool = session->workerPool;
MarkConnectionConnected(connection); MarkConnectionConnected(connection, newConnection);
ereport(DEBUG4, (errmsg("established connection to %s:%d for " ereport(DEBUG4, (errmsg("established connection to %s:%d for "
"session %ld in %ld microseconds", "session %ld in %ld microseconds",
@ -3678,13 +3684,8 @@ UpdateConnectionWaitFlags(WorkerSession *session, int waitFlags)
return; return;
} }
#if PG_VERSION_NUM >= PG_VERSION_15
/* always detect closed sockets */ /* always detect closed sockets */
connection->waitFlags = waitFlags | WL_SOCKET_CLOSED; connection->waitFlags = waitFlags | WL_SOCKET_CLOSED;
#else
connection->waitFlags = waitFlags;
#endif
/* without signalling the execution, the flag changes won't be reflected */ /* without signalling the execution, the flag changes won't be reflected */
execution->waitFlagsChanged = true; execution->waitFlagsChanged = true;
@ -3709,13 +3710,11 @@ CheckConnectionReady(WorkerSession *session)
return false; return false;
} }
#if PG_VERSION_NUM >= PG_VERSION_15
if ((session->latestUnconsumedWaitEvents & WL_SOCKET_CLOSED) != 0) if ((session->latestUnconsumedWaitEvents & WL_SOCKET_CLOSED) != 0)
{ {
connection->connectionState = MULTI_CONNECTION_LOST; connection->connectionState = MULTI_CONNECTION_LOST;
return false; return false;
} }
#endif
/* try to send all pending data */ /* try to send all pending data */
int sendStatus = PQflush(connection->pgConn); int sendStatus = PQflush(connection->pgConn);
@ -3805,7 +3804,7 @@ PopAssignedPlacementExecution(WorkerSession *session)
/* /*
* PopAssignedPlacementExecution finds an executable task from the queue of assigned tasks. * PopUnAssignedPlacementExecution finds an executable task from the queue of unassigned tasks.
*/ */
static TaskPlacementExecution * static TaskPlacementExecution *
PopUnassignedPlacementExecution(WorkerPool *workerPool) PopUnassignedPlacementExecution(WorkerPool *workerPool)
@ -4255,7 +4254,7 @@ WorkerPoolFailed(WorkerPool *workerPool)
} }
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, workerPool->sessionList) foreach_declared_ptr(session, workerPool->sessionList)
{ {
WorkerSessionFailed(session); WorkerSessionFailed(session);
} }
@ -4280,7 +4279,7 @@ WorkerPoolFailed(WorkerPool *workerPool)
List *workerList = workerPool->distributedExecution->workerList; List *workerList = workerPool->distributedExecution->workerList;
WorkerPool *pool = NULL; WorkerPool *pool = NULL;
foreach_ptr(pool, workerList) foreach_declared_ptr(pool, workerList)
{ {
/* failed pools or pools without any connection attempts ignored */ /* failed pools or pools without any connection attempts ignored */
if (pool->failureState == WORKER_POOL_FAILED || if (pool->failureState == WORKER_POOL_FAILED ||
@ -4633,7 +4632,7 @@ PlacementExecutionReady(TaskPlacementExecution *placementExecution)
/* wake up an idle connection by checking whether the connection is writeable */ /* wake up an idle connection by checking whether the connection is writeable */
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, workerPool->sessionList) foreach_declared_ptr(session, workerPool->sessionList)
{ {
MultiConnection *connection = session->connection; MultiConnection *connection = session->connection;
RemoteTransaction *transaction = &(connection->remoteTransaction); RemoteTransaction *transaction = &(connection->remoteTransaction);
@ -4755,10 +4754,10 @@ BuildWaitEventSet(List *sessionList)
int eventSetSize = GetEventSetSize(sessionList); int eventSetSize = GetEventSetSize(sessionList);
WaitEventSet *waitEventSet = WaitEventSet *waitEventSet =
CreateWaitEventSet(CurrentMemoryContext, eventSetSize); CreateWaitEventSet(WaitEventSetTracker_compat, eventSetSize);
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, sessionList) foreach_declared_ptr(session, sessionList)
{ {
AddSessionToWaitEventSet(session, waitEventSet); AddSessionToWaitEventSet(session, waitEventSet);
} }
@ -4856,7 +4855,7 @@ static void
RebuildWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList) RebuildWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList)
{ {
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, sessionList) foreach_declared_ptr(session, sessionList)
{ {
MultiConnection *connection = session->connection; MultiConnection *connection = session->connection;
int waitEventSetIndex = session->waitEventSetIndex; int waitEventSetIndex = session->waitEventSetIndex;
@ -4912,7 +4911,7 @@ CleanUpSessions(DistributedExecution *execution)
/* always trigger wait event set in the first round */ /* always trigger wait event set in the first round */
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, sessionList) foreach_declared_ptr(session, sessionList)
{ {
MultiConnection *connection = session->connection; MultiConnection *connection = session->connection;
@ -4993,7 +4992,7 @@ static void
UnclaimAllSessionConnections(List *sessionList) UnclaimAllSessionConnections(List *sessionList)
{ {
WorkerSession *session = NULL; WorkerSession *session = NULL;
foreach_ptr(session, sessionList) foreach_declared_ptr(session, sessionList)
{ {
MultiConnection *connection = session->connection; MultiConnection *connection = session->connection;

View File

@ -43,8 +43,9 @@
#include "distributed/multi_executor.h" #include "distributed/multi_executor.h"
#include "distributed/multi_router_planner.h" #include "distributed/multi_router_planner.h"
#include "distributed/multi_server_executor.h" #include "distributed/multi_server_executor.h"
#include "distributed/query_stats.h"
#include "distributed/shard_utils.h" #include "distributed/shard_utils.h"
#include "distributed/stats/query_stats.h"
#include "distributed/stats/stat_counters.h"
#include "distributed/subplan_execution.h" #include "distributed/subplan_execution.h"
#include "distributed/worker_log_messages.h" #include "distributed/worker_log_messages.h"
#include "distributed/worker_protocol.h" #include "distributed/worker_protocol.h"
@ -206,7 +207,7 @@ CitusBeginScan(CustomScanState *node, EState *estate, int eflags)
if (distributedPlan->modifyQueryViaCoordinatorOrRepartition != NULL) if (distributedPlan->modifyQueryViaCoordinatorOrRepartition != NULL)
{ {
/* /*
* INSERT..SELECT via coordinator or re-partitioning are special because * INSERT..SELECT / MERGE via coordinator or re-partitioning are special because
* the SELECT part is planned separately. * the SELECT part is planned separately.
*/ */
return; return;
@ -262,8 +263,19 @@ CitusExecScan(CustomScanState *node)
if (!scanState->finishedRemoteScan) if (!scanState->finishedRemoteScan)
{ {
bool isMultiTaskPlan = IsMultiTaskPlan(scanState->distributedPlan);
AdaptiveExecutor(scanState); AdaptiveExecutor(scanState);
if (isMultiTaskPlan)
{
IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD);
}
else
{
IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD);
}
scanState->finishedRemoteScan = true; scanState->finishedRemoteScan = true;
} }
@ -524,7 +536,7 @@ static bool
AnchorShardsInTaskListExist(List *taskList) AnchorShardsInTaskListExist(List *taskList)
{ {
Task *task = NULL; Task *task = NULL;
foreach_ptr(task, taskList) foreach_declared_ptr(task, taskList)
{ {
if (!ShardExists(task->anchorShardId)) if (!ShardExists(task->anchorShardId))
{ {
@ -670,11 +682,13 @@ RegenerateTaskForFasthPathQuery(Job *workerJob)
} }
bool isLocalTableModification = false; bool isLocalTableModification = false;
bool delayedFastPath = false;
GenerateSingleShardRouterTaskList(workerJob, GenerateSingleShardRouterTaskList(workerJob,
relationShardList, relationShardList,
placementList, placementList,
shardId, shardId,
isLocalTableModification); isLocalTableModification,
delayedFastPath);
} }

Some files were not shown because too many files have changed in this diff Show More