Merge remote-tracking branch 'origin/create_alter_database' into alter_database_additional_options

pull/7253/head
gindibay 2023-11-10 05:38:19 +03:00
commit b45543f51b
177 changed files with 2505 additions and 2357 deletions

File diff suppressed because it is too large Load Diff

View File

@ -127,72 +127,61 @@
},
"cffi": {
"hashes": [
"sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5",
"sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef",
"sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104",
"sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426",
"sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405",
"sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375",
"sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a",
"sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e",
"sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc",
"sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf",
"sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185",
"sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497",
"sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3",
"sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35",
"sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c",
"sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83",
"sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21",
"sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca",
"sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984",
"sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac",
"sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd",
"sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee",
"sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a",
"sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2",
"sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192",
"sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7",
"sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585",
"sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f",
"sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e",
"sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27",
"sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b",
"sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e",
"sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e",
"sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d",
"sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c",
"sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415",
"sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82",
"sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02",
"sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314",
"sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325",
"sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c",
"sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3",
"sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914",
"sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045",
"sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d",
"sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9",
"sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5",
"sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2",
"sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c",
"sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3",
"sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2",
"sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8",
"sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d",
"sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d",
"sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9",
"sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162",
"sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76",
"sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4",
"sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e",
"sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9",
"sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6",
"sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b",
"sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01",
"sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"
"sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc",
"sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a",
"sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417",
"sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab",
"sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520",
"sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36",
"sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743",
"sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8",
"sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed",
"sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684",
"sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56",
"sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324",
"sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d",
"sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235",
"sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e",
"sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088",
"sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000",
"sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7",
"sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e",
"sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673",
"sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c",
"sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe",
"sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2",
"sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098",
"sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8",
"sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a",
"sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0",
"sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b",
"sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896",
"sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e",
"sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9",
"sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2",
"sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b",
"sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6",
"sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404",
"sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f",
"sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0",
"sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4",
"sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc",
"sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936",
"sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba",
"sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872",
"sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb",
"sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614",
"sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1",
"sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d",
"sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969",
"sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b",
"sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4",
"sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627",
"sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956",
"sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"
],
"version": "==1.15.1"
"markers": "python_version >= '3.8'",
"version": "==1.16.0"
},
"click": {
"hashes": [
@ -420,78 +409,78 @@
"mitmproxy": {
"editable": true,
"git": "https://github.com/citusdata/mitmproxy.git",
"markers": "python_version >= '3.10'",
"markers": "python_version >= '3.9'",
"ref": "2fd18ef051b987925a36337ab1d61aa674353b44"
},
"msgpack": {
"hashes": [
"sha256:00ce5f827d4f26fc094043e6f08b6069c1b148efa2631c47615ae14fb6cafc89",
"sha256:04450e4b5e1e662e7c86b6aafb7c230af9334fd0becf5e6b80459a507884241c",
"sha256:099c3d8a027367e1a6fc55d15336f04ff65c60c4f737b5739f7db4525c65fe9e",
"sha256:102cfb54eaefa73e8ca1e784b9352c623524185c98e057e519545131a56fb0af",
"sha256:14db7e1b7a7ed362b2f94897bf2486c899c8bb50f6e34b2db92fe534cdab306f",
"sha256:159cfec18a6e125dd4723e2b1de6f202b34b87c850fb9d509acfd054c01135e9",
"sha256:1dc67b40fe81217b308ab12651adba05e7300b3a2ccf84d6b35a878e308dd8d4",
"sha256:1f0e36a5fa7a182cde391a128a64f437657d2b9371dfa42eda3436245adccbf5",
"sha256:229ccb6713c8b941eaa5cf13dc7478eba117f21513b5893c35e44483e2f0c9c8",
"sha256:25d3746da40f3c8c59c3b1d001e49fd2aa17904438f980d9a391370366df001e",
"sha256:32c0aff31f33033f4961abc01f78497e5e07bac02a508632aef394b384d27428",
"sha256:33bbf47ea5a6ff20c23426106e81863cdbb5402de1825493026ce615039cc99d",
"sha256:35ad5aed9b52217d4cea739d0ea3a492a18dd86fecb4b132668a69f27fb0363b",
"sha256:3910211b0ab20be3a38e0bb944ed45bd4265d8d9f11a3d1674b95b298e08dd5c",
"sha256:3b5658b1f9e486a2eec4c0c688f213a90085b9cf2fec76ef08f98fdf6c62f4b9",
"sha256:40b801b768f5a765e33c68f30665d3c6ee1c8623a2d2bb78e6e59f2db4e4ceb7",
"sha256:47275ff73005a3e5e146e50baa2378e1730cba6e292f0222bc496a8e4c4adfc8",
"sha256:55bb4a1bf94e39447bc08238a2fb8a767460388a8192f67c103442eb36920887",
"sha256:5b08676a17e3f791daad34d5fcb18479e9c85e7200d5a17cbe8de798643a7e37",
"sha256:5b16344032a27b2ccfd341f89dadf3e4ef6407d91e4b93563c14644a8abb3ad7",
"sha256:5c5e05e4f5756758c58a8088aa10dc70d851c89f842b611fdccfc0581c1846bc",
"sha256:5cd67674db3c73026e0a2c729b909780e88bd9cbc8184256f9567640a5d299a8",
"sha256:5e7fae9ca93258a956551708cf60dc6c8145574e32ce8c8c4d894e63bcb04341",
"sha256:61213482b5a387ead9e250e9e3cb290292feca39dc83b41c3b1b7b8ffc8d8ecb",
"sha256:619a63753ba9e792fe3c6c0fc2b9ee2cfbd92153dd91bee029a89a71eb2942cd",
"sha256:652e4b7497825b0af6259e2c54700e6dc33d2fc4ed92b8839435090d4c9cc911",
"sha256:68569509dd015fcdd1e6b2b3ccc8c51fd27d9a97f461ccc909270e220ee09685",
"sha256:6a01a072b2219b65a6ff74df208f20b2cac9401c60adb676ee34e53b4c651077",
"sha256:70843788c85ca385846a2d2f836efebe7bb2687ca0734648bf5c9dc6c55602d2",
"sha256:76820f2ece3b0a7c948bbb6a599020e29574626d23a649476def023cbb026787",
"sha256:7a006c300e82402c0c8f1ded11352a3ba2a61b87e7abb3054c845af2ca8d553c",
"sha256:7baf16fd8908a025c4a8d7b699103e72d41f967e2aee5a2065432bcdbd9fd06e",
"sha256:7ecf431786019a7bfedc28281531d706627f603e3691d64eccdbce3ecd353823",
"sha256:885de1ed5ea01c1bfe0a34c901152a264c3c1f8f1d382042b92ea354bd14bb0e",
"sha256:88cdb1da7fdb121dbb3116910722f5acab4d6e8bfcacab8fafe27e2e7744dc6a",
"sha256:95ade0bd4cf69e04e8b8f8ec2d197d9c9c4a9b6902e048dc7456bf6d82e12a80",
"sha256:9b88dc97ba86c96b964c3745a445d9a65f76fe21955a953064fe04adb63e9367",
"sha256:9c780d992f5d734432726b92a0c87bf1857c3d85082a8dea29cbf56e44a132b3",
"sha256:9f85200ea102276afdd3749ca94747f057bbb868d1c52921ee2446730b508d0f",
"sha256:a1cf98afa7ad5e7012454ca3fde254499a13f9d92fd50cb46118118a249a1355",
"sha256:a635aecf1047255576dbb0927cbf9a7aa4a68e9d54110cc3c926652d18f144e0",
"sha256:ae97504958d0bc58c1152045c170815d5c4f8af906561ce044b6358b43d0c97e",
"sha256:b06a5095a79384760625b5de3f83f40b3053a385fb893be8a106fbbd84c14980",
"sha256:b5c8dd9a386a66e50bd7fa22b7a49fb8ead2b3574d6bd69eb1caced6caea0803",
"sha256:bae6c561f11b444b258b1b4be2bdd1e1cf93cd1d80766b7e869a79db4543a8a8",
"sha256:bbb4448a05d261fae423d5c0b0974ad899f60825bc77eabad5a0c518e78448c2",
"sha256:bd6af61388be65a8701f5787362cb54adae20007e0cc67ca9221a4b95115583b",
"sha256:bf652839d16de91fe1cfb253e0a88db9a548796939533894e07f45d4bdf90a5f",
"sha256:d6d25b8a5c70e2334ed61a8da4c11cd9b97c6fbd980c406033f06e4463fda006",
"sha256:da057d3652e698b00746e47f06dbb513314f847421e857e32e1dc61c46f6c052",
"sha256:e0ed35d6d6122d0baa9a1b59ebca4ee302139f4cfb57dab85e4c73ab793ae7ed",
"sha256:e36560d001d4ba469d469b02037f2dd404421fd72277d9474efe9f03f83fced5",
"sha256:f4321692e7f299277e55f322329b2c972d93bb612d85f3fda8741bec5c6285ce",
"sha256:f75114c05ec56566da6b55122791cf5bb53d5aada96a98c016d6231e03132f76",
"sha256:fb4571efe86545b772a4630fee578c213c91cbcfd20347806e47fd4e782a18fe",
"sha256:fc97aa4b4fb928ff4d3b74da7c30b360d0cb3ede49a5a6e1fd9705f49aea1deb"
"sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862",
"sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d",
"sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3",
"sha256:1e2d69948e4132813b8d1131f29f9101bc2c915f26089a6d632001a5c1349672",
"sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0",
"sha256:27dcd6f46a21c18fa5e5deed92a43d4554e3df8d8ca5a47bf0615d6a5f39dbc9",
"sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee",
"sha256:3476fae43db72bd11f29a5147ae2f3cb22e2f1a91d575ef130d2bf49afd21c46",
"sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524",
"sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819",
"sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc",
"sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc",
"sha256:3e7bf4442b310ff154b7bb9d81eb2c016b7d597e364f97d72b1acc3817a0fdc1",
"sha256:3f0c8c6dfa6605ab8ff0611995ee30d4f9fcff89966cf562733b4008a3d60d82",
"sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81",
"sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6",
"sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d",
"sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2",
"sha256:52700dc63a4676669b341ba33520f4d6e43d3ca58d422e22ba66d1736b0a6e4c",
"sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87",
"sha256:576eb384292b139821c41995523654ad82d1916da6a60cff129c715a6223ea84",
"sha256:5b0bf0effb196ed76b7ad883848143427a73c355ae8e569fa538365064188b8e",
"sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95",
"sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f",
"sha256:6d4c80667de2e36970ebf74f42d1088cc9ee7ef5f4e8c35eee1b40eafd33ca5b",
"sha256:730076207cb816138cf1af7f7237b208340a2c5e749707457d70705715c93b93",
"sha256:7687e22a31e976a0e7fc99c2f4d11ca45eff652a81eb8c8085e9609298916dcf",
"sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61",
"sha256:84b0daf226913133f899ea9b30618722d45feffa67e4fe867b0b5ae83a34060c",
"sha256:85765fdf4b27eb5086f05ac0491090fc76f4f2b28e09d9350c31aac25a5aaff8",
"sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d",
"sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c",
"sha256:98bbd754a422a0b123c66a4c341de0474cad4a5c10c164ceed6ea090f3563db4",
"sha256:993584fc821c58d5993521bfdcd31a4adf025c7d745bbd4d12ccfecf695af5ba",
"sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415",
"sha256:b291f0ee7961a597cbbcc77709374087fa2a9afe7bdb6a40dbbd9b127e79afee",
"sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d",
"sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9",
"sha256:bdf38ba2d393c7911ae989c3bbba510ebbcdf4ecbdbfec36272abe350c454075",
"sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f",
"sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7",
"sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681",
"sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329",
"sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1",
"sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf",
"sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c",
"sha256:e45ae4927759289c30ccba8d9fdce62bb414977ba158286b5ddaf8df2cddb5c5",
"sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b",
"sha256:ebbbba226f0a108a7366bf4b59bf0f30a12fd5e75100c630267d94d7f0ad20e5",
"sha256:ec79ff6159dffcc30853b2ad612ed572af86c92b5168aa3fc01a67b0fa40665e",
"sha256:f0936e08e0003f66bfd97e74ee530427707297b0d0361247e9b4f59ab78ddc8b",
"sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad",
"sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd",
"sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7",
"sha256:f9a7c509542db4eceed3dcf21ee5267ab565a83555c9b88a8109dcecc4709002",
"sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc"
],
"markers": "python_version >= '3.8'",
"version": "==1.0.6"
"version": "==1.0.7"
},
"packaging": {
"hashes": [
"sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61",
"sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"
"sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5",
"sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"
],
"markers": "python_version >= '3.7'",
"version": "==23.1"
"version": "==23.2"
},
"passlib": {
"hashes": [
@ -698,6 +687,62 @@
"markers": "python_version >= '3'",
"version": "==0.17.16"
},
"ruamel.yaml.clib": {
"hashes": [
"sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d",
"sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001",
"sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462",
"sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9",
"sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b",
"sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b",
"sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615",
"sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15",
"sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b",
"sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9",
"sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675",
"sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1",
"sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899",
"sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7",
"sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7",
"sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312",
"sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa",
"sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f",
"sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91",
"sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa",
"sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b",
"sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3",
"sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334",
"sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5",
"sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3",
"sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe",
"sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3",
"sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed",
"sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337",
"sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880",
"sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d",
"sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248",
"sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d",
"sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279",
"sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf",
"sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512",
"sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069",
"sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb",
"sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942",
"sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d",
"sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31",
"sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92",
"sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd",
"sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5",
"sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28",
"sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d",
"sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1",
"sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2",
"sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875",
"sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"
],
"markers": "python_version < '3.10' and platform_python_implementation == 'CPython'",
"version": "==0.2.8"
},
"sortedcontainers": {
"hashes": [
"sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88",
@ -746,11 +791,12 @@
},
"werkzeug": {
"hashes": [
"sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8",
"sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528"
"sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc",
"sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==2.3.7"
"version": "==3.0.1"
},
"wsproto": {
"hashes": [
@ -906,11 +952,11 @@
},
"packaging": {
"hashes": [
"sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61",
"sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"
"sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5",
"sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"
],
"markers": "python_version >= '3.7'",
"version": "==23.1"
"version": "==23.2"
},
"pathspec": {
"hashes": [
@ -922,19 +968,19 @@
},
"platformdirs": {
"hashes": [
"sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d",
"sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d"
"sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3",
"sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e"
],
"markers": "python_version >= '3.7'",
"version": "==3.10.0"
"version": "==3.11.0"
},
"pycodestyle": {
"hashes": [
"sha256:259bcc17857d8a8b3b4a2327324b79e5f020a13c16074670f9c8c8f872ea76d0",
"sha256:5d1013ba8dc7895b548be5afb05740ca82454fd899971563d2ef625d090326f8"
"sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f",
"sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"
],
"markers": "python_version >= '3.8'",
"version": "==2.11.0"
"version": "==2.11.1"
},
"pyflakes": {
"hashes": [

View File

@ -10,8 +10,13 @@ on:
required: false
default: false
type: boolean
push:
branches:
- "main"
- "release-*"
pull_request:
types: [opened, reopened,synchronize]
merge_group:
jobs:
# Since GHA does not interpolate env varibles in matrix context, we need to
# define them in a separate job and use them in other jobs.
@ -27,9 +32,9 @@ jobs:
style_checker_image_name: "citus/stylechecker"
style_checker_tools_version: "0.8.18"
image_suffix: "-v9d71045"
pg14_version: "14.9"
pg15_version: "15.4"
pg16_version: "16.0"
pg14_version: '{ "major": "14", "full": "14.9" }'
pg15_version: '{ "major": "15", "full": "15.4" }'
pg16_version: '{ "major": "16", "full": "16.0" }'
upgrade_pg_versions: "14.9-15.4-16.0"
steps:
# Since GHA jobs needs at least one step we use a noop step here.
@ -93,7 +98,7 @@ jobs:
run: ci/check_migration_files.sh
build:
needs: params
name: Build for PG ${{ matrix.pg_version}}
name: Build for PG${{ fromJson(matrix.pg_version).major }}
strategy:
fail-fast: false
matrix:
@ -107,7 +112,7 @@ jobs:
- ${{ needs.params.outputs.pg16_version }}
runs-on: ubuntu-20.04
container:
image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ matrix.image_suffix }}"
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}"
options: --user root
steps:
- uses: actions/checkout@v3.5.0
@ -124,7 +129,7 @@ jobs:
./build-${{ env.PG_MAJOR }}/*
./install-${{ env.PG_MAJOR }}.tar
test-citus:
name: PG${{ matrix.pg_version }} - ${{ matrix.make }}
name: PG${{ fromJson(matrix.pg_version).major }} - ${{ matrix.make }}
strategy:
fail-fast: false
matrix:
@ -211,7 +216,7 @@ jobs:
image_name: ${{ needs.params.outputs.fail_test_image_name }}
runs-on: ubuntu-20.04
container:
image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ needs.params.outputs.image_suffix }}"
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
options: --user root --dns=8.8.8.8
# Due to Github creates a default network for each job, we need to use
# --dns= to have similar DNS settings as our other CI systems or local
@ -228,17 +233,17 @@ jobs:
- uses: "./.github/actions/save_logs_and_results"
if: always()
with:
folder: ${{ matrix.pg_version }}_${{ matrix.make }}
folder: ${{ fromJson(matrix.pg_version).major }}_${{ matrix.make }}
- uses: "./.github/actions/upload_coverage"
if: always()
with:
flags: ${{ env.PG_MAJOR }}_${{ matrix.suite }}_${{ matrix.make }}
codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-arbitrary-configs:
name: PG${{ matrix.pg_version }} - check-arbitrary-configs-${{ matrix.parallel }}
name: PG${{ fromJson(matrix.pg_version).major }} - check-arbitrary-configs-${{ matrix.parallel }}
runs-on: ["self-hosted", "1ES.Pool=1es-gha-citusdata-pool"]
container:
image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ needs.params.outputs.image_suffix }}"
image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}"
options: --user root
needs:
- params
@ -333,10 +338,10 @@ jobs:
flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-citus-upgrade:
name: PG${{ needs.params.outputs.pg14_version }} - check-citus-upgrade
name: PG${{ fromJson(needs.params.outputs.pg14_version).major }} - check-citus-upgrade
runs-on: ubuntu-20.04
container:
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ needs.params.outputs.pg14_version }}${{ needs.params.outputs.image_suffix }}"
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg14_version).full }}${{ needs.params.outputs.image_suffix }}"
options: --user root
needs:
- params
@ -383,7 +388,7 @@ jobs:
CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }}
runs-on: ubuntu-20.04
container:
image: ${{ needs.params.outputs.test_image_name }}:${{ needs.params.outputs.pg16_version }}${{ needs.params.outputs.image_suffix }}
image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }}
needs:
- params
- test-citus
@ -478,7 +483,7 @@ jobs:
name: Test flakyness
runs-on: ubuntu-20.04
container:
image: ${{ needs.params.outputs.fail_test_image_name }}:${{ needs.params.outputs.pg16_version }}${{ needs.params.outputs.image_suffix }}
image: ${{ needs.params.outputs.fail_test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }}
options: --user root
env:
runs: 8
@ -492,7 +497,6 @@ jobs:
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
steps:
- uses: actions/checkout@v3.5.0
- uses: actions/download-artifact@v3.0.1
- uses: "./.github/actions/setup_extension"
- name: Run minimal tests
run: |-
@ -501,7 +505,7 @@ jobs:
for test in "${tests_array[@]}"
do
test_name=$(echo "$test" | sed -r "s/.+\/(.+)\..+/\1/")
gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat ${{ env.runs }} --use-base-schedule --use-whole-schedule-line
gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat ${{ env.runs }} --use-whole-schedule-line
done
shell: bash
- uses: "./.github/actions/save_logs_and_results"

View File

@ -71,7 +71,7 @@ jobs:
- uses: "./.github/actions/setup_extension"
- name: Run minimal tests
run: |-
gosu circleci src/test/regress/citus_tests/run_test.py ${{ env.test }} --repeat ${{ env.runs }} --use-base-schedule --use-whole-schedule-line
gosu circleci src/test/regress/citus_tests/run_test.py ${{ env.test }} --repeat ${{ env.runs }} --use-whole-schedule-line
shell: bash
- uses: "./.github/actions/save_logs_and_results"
if: always()

View File

@ -3,6 +3,7 @@ name: Build tests in packaging images
on:
pull_request:
types: [opened, reopened,synchronize]
merge_group:
workflow_dispatch:
@ -24,9 +25,11 @@ jobs:
- name: Get Postgres Versions
id: get-postgres-versions
run: |
# Postgres versions are stored in .github/workflows/build_and_test.yml file in "pg[pg-version]_version"
# format. Below command extracts the versions and get the unique values.
pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE 'pg[0-9]+_version: "[0-9.]+"' | sed -E 's/pg([0-9]+)_version: "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',')
set -euxo pipefail
# Postgres versions are stored in .github/workflows/build_and_test.yml
# file in json strings with major and full keys.
# Below command extracts the versions and get the unique values.
pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE '"major": "[0-9]+", "full": "[0-9.]+"' | sed -E 's/"major": "([0-9]+)", "full": "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',')
pg_versions_array="[ ${pg_versions} ]"
echo "Supported PG Versions: ${pg_versions_array}"
# Below line is needed to set the output variable to be used in the next job

View File

@ -1,3 +1,11 @@
### citus v12.1.1 (November 9, 2023) ###
* Fixes leaking of memory and memory contexts in Citus foreign key cache
(#7219)
* Makes sure to disallow creating a replicated distributed table concurrently
(#7236)
### citus v12.1.0 (September 12, 2023) ###
* Adds support for PostgreSQL 16.0 (#7173)

View File

@ -14,8 +14,8 @@ ci_scripts=$(
grep -v -E '^(ci_helpers.sh|fix_style.sh)$'
)
for script in $ci_scripts; do
if ! grep "\\bci/$script\\b" .circleci/config.yml > /dev/null; then
echo "ERROR: CI script with name \"$script\" is not actually used in .circleci/config.yml"
if ! grep "\\bci/$script\\b" -r .github > /dev/null; then
echo "ERROR: CI script with name \"$script\" is not actually used in .github folder"
exit 1
fi
if ! grep "^## \`$script\`\$" ci/README.md > /dev/null; then

View File

@ -1,96 +0,0 @@
#!/bin/bash
# Testing this script locally requires you to set the following environment
# variables:
# CIRCLE_BRANCH, GIT_USERNAME and GIT_TOKEN
# fail if trying to reference a variable that is not set.
set -u
# exit immediately if a command fails
set -e
# Fail on pipe failures
set -o pipefail
PR_BRANCH="${CIRCLE_BRANCH}"
ENTERPRISE_REMOTE="https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/citusdata/citus-enterprise"
# shellcheck disable=SC1091
source ci/ci_helpers.sh
# List executed commands. This is done so debugging this script is easier when
# it fails. It's explicitly done after git remote add so username and password
# are not shown in CI output (even though it's also filtered out by CircleCI)
set -x
check_compile () {
echo "INFO: checking if merged code can be compiled"
./configure --without-libcurl
make -j10
}
# Clone current git repo (which should be community) to a temporary working
# directory and go there
GIT_DIR_ROOT="$(git rev-parse --show-toplevel)"
TMP_GIT_DIR="$(mktemp --directory -t citus-merge-check.XXXXXXXXX)"
git clone "$GIT_DIR_ROOT" "$TMP_GIT_DIR"
cd "$TMP_GIT_DIR"
# Fails in CI without this
git config user.email "citus-bot@microsoft.com"
git config user.name "citus bot"
# Disable "set -x" temporarily, because $ENTERPRISE_REMOTE contains passwords
{ set +x ; } 2> /dev/null
git remote add enterprise "$ENTERPRISE_REMOTE"
set -x
git remote set-url --push enterprise no-pushing
# Fetch enterprise-master
git fetch enterprise enterprise-master
git checkout "enterprise/enterprise-master"
if git merge --no-commit "origin/$PR_BRANCH"; then
echo "INFO: community PR branch could be merged into enterprise-master"
# check that we can compile after the merge
if check_compile; then
exit 0
fi
echo "WARN: Failed to compile after community PR branch was merged into enterprise"
fi
# undo partial merge
git merge --abort
# If we have a conflict on enterprise merge on the master branch, we have a problem.
# Provide an error message to indicate that enterprise merge is needed to fix this check.
if [[ $PR_BRANCH = master ]]; then
echo "ERROR: Master branch has merge conflicts with enterprise-master."
echo "Try re-running this CI job after merging your changes into enterprise-master."
exit 1
fi
if ! git fetch enterprise "$PR_BRANCH" ; then
echo "ERROR: enterprise/$PR_BRANCH was not found and community PR branch could not be merged into enterprise-master"
exit 1
fi
# Show the top commit of the enterprise PR branch to make debugging easier
git log -n 1 "enterprise/$PR_BRANCH"
# Check that this branch contains the top commit of the current community PR
# branch. If it does not it means it's not up to date with the current PR, so
# the enterprise branch should be updated.
if ! git merge-base --is-ancestor "origin/$PR_BRANCH" "enterprise/$PR_BRANCH" ; then
echo "ERROR: enterprise/$PR_BRANCH is not up to date with community PR branch"
exit 1
fi
# Now check if we can merge the enterprise PR into enterprise-master without
# issues.
git merge --no-commit "enterprise/$PR_BRANCH"
# check that we can compile after the merge
check_compile

View File

@ -18,7 +18,7 @@
#include "lib/stringinfo.h"
#include "columnar/columnar_compression.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#if HAVE_CITUS_LIBLZ4
#include <lz4.h>

View File

@ -15,7 +15,7 @@
#include "access/table.h"
#include "catalog/pg_am.h"
#include "catalog/pg_type.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "miscadmin.h"
#include "storage/fd.h"
#include "storage/smgr.h"

View File

@ -1723,11 +1723,11 @@ Merge command the same principles as INSERT .. SELECT processing. However, due t
# DDL
DDL commands are primarily handled via the ProcessUtility hook, which gets the parse tree of the DDL command. For supported DDL commands, we always follow the same sequence of steps:
DDL commands are primarily handled via the citus_ProcessUtility hook, which gets the parse tree of the DDL command. For supported DDL commands, we always follow the same sequence of steps:
1. Qualify the table names in the parse tree (simplifies deparsing, avoids sensitivity to search_path changes)
2. Pre-process logic
3. Call original ProcessUtility to execute the command on the local shell table
3. Call original previous ProcessUtility to execute the command on the local shell table
4. Post-process logic
5. Execute command on all other nodes
6. Execute command on shards (in case of table DDL)
@ -1749,6 +1749,66 @@ The reason for handling dependencies and deparsing in post-process step is that
Not all table DDL is currently deparsed. In that case, the original command sent by the client is used. That is a shortcoming in our DDL logic that causes user-facing issues and should be addressed. We do not directly construct a separate DDL command for each shard. Instead, we call the `worker_apply_shard_ddl_command(shardid bigint, ddl_command text)` function which parses the DDL command, replaces the table names with shard names in the parse tree according to the shard ID, and then executes the command. That also has some shortcomings, because we cannot support more complex DDL commands in this manner (e.g. adding multiple foreign keys). Ideally, all DDL would be deparsed, and for table DDL the deparsed query string would have shard names, similar to regular queries.
`markDistributed` is used to indicate whether we add a record to `pg_dist_object` to mark the object as "distributed".
## Defining a new DDL command
All commands that are propagated by Citus should be defined in DistributeObjectOps struct. Below is a sample DistributeObjectOps for ALTER DATABASE command that is defined in [distribute_object_ops.c](commands/distribute_object_ops.c) file.
```c
static DistributeObjectOps Database_Alter = {
.deparse = DeparseAlterDatabaseStmt,
.qualify = NULL,
.preprocess = PreprocessAlterDatabaseStmt,
.postprocess = NULL,
.objectType = OBJECT_DATABASE,
.operationType = DIST_OPS_ALTER,
.address = NULL,
.markDistributed = false,
};
```
Each field in the struct is documented in the comments within the `DistributeObjectOps`. When defining a new DDL command, follow these guidelines:
- **Returning tasks for `preprocess` and `postprocess`**: Ensure that either `preprocess` or `postprocess` returns a list of "DDLJob"s. If both functions return non-empty lists, then you would get an assertion failure.
- **Generic `preprocess` and `postprocess` methods**: The generic methods, `PreprocessAlterDistributedObjectStmt` and `PostprocessAlterDistributedObjectStmt`, serve as generic pre and post methods utilized for various statements. Both of these methods find application in distributed object operations.
- The `PreprocessAlterDistributedObjectStmt` method carries out the following operations:
- Performs a qualification operation.
- Deparses the statement and generates a task list.
- As for the `PostprocessAlterDistributedObjectStmt` method, it:
- Invokes the `EnsureAllObjectDependenciesExistOnAllNodes` function to propagate missing dependencies, both on the coordinator and the worker.
- Before defining new `preprocess` or `postprocess` methods, it is advisable to assess whether the generic methods can be employed in your specific case.
- **`deparse`**: When propagating the command to worker nodes, make sure to define `deparse`. This is necessary because it generates a query string for each worker node.
- **`markDistributed`**: Set this flag to true if you want to add a record to the `pg_dist_object` table. This is particularly important for `CREATE` statements when introducing a new object to the system.
- **`address`**: If `markDistributed` is set to true, you must define the `address`. Failure to do so will result in a runtime error. The `address` is required to identify the fields that will be stored in the `pg_dist_object` table.
- **`markDistributed` usage in `DROP` Statements**: Please note that `markDistributed` does not apply to `DROP` statements. For `DROP` statements, instead you need to call `UnmarkObjectDistributed()` for the object either in `preprocess` or `postprocess`. Otherwise, state records in ``pg_dist_object`` table will cause errors in UDF calls such as ``citus_add_node()``, which will try to copy the non-existent db object.
- **`qualify`**: The `qualify` function is used to qualify the objects based on their schemas in the parse tree. It is employed to prevent sensitivity to changes in the `search_path` on worker nodes. Note that it is not mandatory to define this function for all DDL commands. It is only required for commands that involve objects that are bound to schemas, such as; tables, types, functions and so on.
After defining the `DistributeObjectOps` structure, this structure should be implemented in the `GetDistributeObjectOps()` function as shown below:
```c
// Example implementation in C code
const DistributeObjectOps *
GetDistributeObjectOps(Node *node)
{
switch (nodeTag(node))
{
case T_AlterDatabaseStmt:
{
return &Database_Alter;
}
...
```
## Object & dependency propagation

View File

@ -13,7 +13,7 @@
#include "postgres.h"
#include "funcapi.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "catalog/pg_proc.h"
#include "commands/defrem.h"

View File

@ -12,7 +12,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/xact.h"
#include "catalog/pg_constraint.h"

View File

@ -11,7 +11,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/backend_data.h"
#include "distributed/metadata_cache.h"

View File

@ -10,7 +10,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "commands/defrem.h"

View File

@ -27,7 +27,7 @@
#include "distributed/multi_executor.h"
#include "distributed/relation_access_tracking.h"
#include "distributed/worker_create_or_replace.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/worker_manager.h"
#include "parser/parse_type.h"
#include "utils/builtins.h"

View File

@ -14,8 +14,10 @@
#include "postgres.h"
#include "catalog/objectaddress.h"
#include "catalog/pg_database.h"
#include "catalog/pg_ts_config.h"
#include "catalog/pg_ts_dict.h"
#include "commands/dbcommands.h"
#include "nodes/parsenodes.h"
#include "tcop/utility.h"
@ -28,8 +30,6 @@
#include "distributed/metadata/distobject.h"
#include "distributed/multi_executor.h"
#include "distributed/worker_transaction.h"
#include "catalog/pg_database.h"
#include "commands/dbcommands.h"
/*

View File

@ -11,7 +11,7 @@
#include "postgres.h"
#include "miscadmin.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/commands/utility_hook.h"
#include "access/genam.h"

View File

@ -10,52 +10,59 @@
*/
#include "postgres.h"
#include "miscadmin.h"
#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/xact.h"
#include "catalog/objectaddress.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_database.h"
#include "catalog/pg_database_d.h"
#include "catalog/pg_tablespace.h"
#include "commands/dbcommands.h"
#include "miscadmin.h"
#include "nodes/parsenodes.h"
#include "utils/syscache.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
#include "utils/relcache.h"
#include "utils/syscache.h"
#include "distributed/adaptive_executor.h"
#include "distributed/commands.h"
#include "distributed/commands/utility_hook.h"
#include "distributed/deparse_shard_query.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
#include "distributed/metadata/distobject.h"
#include "distributed/metadata_sync.h"
#include "distributed/metadata_utility.h"
#include "distributed/multi_executor.h"
#include "distributed/relation_access_tracking.h"
#include "distributed/worker_transaction.h"
#include "distributed/deparser.h"
#include "distributed/worker_protocol.h"
#include "distributed/metadata/distobject.h"
#include "distributed/deparse_shard_query.h"
#include "distributed/listutils.h"
#include "distributed/adaptive_executor.h"
#include "access/htup_details.h"
#include "catalog/pg_tablespace.h"
#include "access/heapam.h"
#include "utils/relcache.h"
#include "utils/rel.h"
#include "utils/lsyscache.h"
#include "catalog/pg_collation.h"
#include "utils/relcache.h"
#include "catalog/pg_database_d.h"
#include "distributed/worker_transaction.h"
/*
* DatabaseCollationInfo is used to store collation related information of a database
*/
typedef struct DatabaseCollationInfo
{
char *collation;
char *ctype;
#if PG_VERSION_NUM >= PG_VERSION_15
char *icu_locale;
char *collversion;
#endif
} DatabaseCollationInfo;
static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid);
PG_FUNCTION_INFO_V1(citus_internal_database_command);
static Oid get_database_owner(Oid db_oid);
List * PreprocessGrantOnDatabaseStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
/* controlled via GUC */
bool EnableCreateDatabasePropagation = true;
bool EnableCreateDatabasePropagation = false;
bool EnableAlterDatabaseOwner = true;
/*
@ -306,7 +313,6 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString,
return NIL;
}
AlterDatabaseSetStmt *stmt = castNode(AlterDatabaseSetStmt, node);
EnsureCoordinator();
@ -321,6 +327,13 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString,
}
/*
* PostprocessAlterDatabaseStmt is executed before the statement is applied to the local
* postgres instance.
*
* In this stage, we can perform validations and prepare the commands that need to
* be run on all workers to grant.
*/
List *
PreprocessCreateDatabaseStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
@ -361,82 +374,21 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString)
(void *) createDatabaseCommand,
ENABLE_DDL_PROPAGATION);
return NontransactionalNodeDDLTask(NON_COORDINATOR_NODES, commands);
return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* citus_internal_database_command is an internal UDF to
* create/drop a database in an idempotent maner without
* transaction block restrictions.
* PostprocessAlterDatabaseStmt is executed after the statement is applied to the local
* postgres instance. In this stage we can prepare the commands that need to be run on
* all workers to drop the database. Since the DROP DATABASE statement gives error in
* transaction context, we need to use NontransactionalNodeDDLTaskList to send the
* DROP DATABASE statement to the workers.
*/
Datum
citus_internal_database_command(PG_FUNCTION_ARGS)
{
int saveNestLevel = NewGUCNestLevel();
text *commandText = PG_GETARG_TEXT_P(0);
char *command = text_to_cstring(commandText);
Node *parseTree = ParseTreeNode(command);
set_config_option("citus.enable_ddl_propagation", "off",
(superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION,
GUC_ACTION_LOCAL, true, 0, false);
set_config_option("citus.enable_create_database_propagation", "off",
(superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION,
GUC_ACTION_LOCAL, true, 0, false);
/*
* createdb() / DropDatabase() uses ParseState to report the error position for the
* input command and the position is reported to be 0 when it's provided as NULL.
* We're okay with that because we don't expect this UDF to be called with an incorrect
* DDL command.
*
*/
ParseState *pstate = NULL;
if (IsA(parseTree, CreatedbStmt))
{
CreatedbStmt *stmt = castNode(CreatedbStmt, parseTree);
bool missingOk = true;
Oid databaseOid = get_database_oid(stmt->dbname, missingOk);
if (!OidIsValid(databaseOid))
{
createdb(pstate, (CreatedbStmt *) parseTree);
}
}
else if (IsA(parseTree, DropdbStmt))
{
DropdbStmt *stmt = castNode(DropdbStmt, parseTree);
bool missingOk = false;
Oid databaseOid = get_database_oid(stmt->dbname, missingOk);
if (OidIsValid(databaseOid))
{
DropDatabase(pstate, (DropdbStmt *) parseTree);
}
}
else
{
ereport(ERROR, (errmsg("unsupported command type %d", nodeTag(parseTree))));
}
/* Below command rollbacks flags to the state before this session*/
AtEOXact_GUC(true, saveNestLevel);
PG_RETURN_VOID();
}
List *
PreprocessDropDatabaseStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
bool isPostProcess = false;
if (!EnableCreateDatabasePropagation || !ShouldPropagate())
{
return NIL;
@ -446,41 +398,50 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString,
DropdbStmt *stmt = (DropdbStmt *) node;
bool isPostProcess = false;
List *addresses = GetObjectAddressListFromParseTree(node, stmt->missing_ok,
isPostProcess);
if (list_length(addresses) == 0)
if (list_length(addresses) != 1)
{
return NIL;
ereport(ERROR, (errmsg("unexpected number of objects found when "
"executing DROP DATABASE command")));
}
ObjectAddress *address = (ObjectAddress *) linitial(addresses);
if (address->objectId == InvalidOid || !IsObjectDistributed(address))
if (address->objectId == InvalidOid || !IsAnyObjectDistributed(list_make1(address)))
{
return NIL;
}
char *dropDatabaseCommand = DeparseTreeNode(node);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) dropDatabaseCommand,
ENABLE_DDL_PROPAGATION);
return NontransactionalNodeDDLTask(NON_COORDINATOR_NODES, commands);
return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* GetDatabaseAddressFromDatabaseName gets the database name and returns the ObjectAddress
* of the database.
*/
static ObjectAddress *
GetDatabaseAddressFromDatabaseName(char *databaseName, bool missingOk)
{
Oid databaseOid = get_database_oid(databaseName, missingOk);
ObjectAddress *dbAddress = palloc0(sizeof(ObjectAddress));
ObjectAddressSet(*dbAddress, DatabaseRelationId, databaseOid);
return dbAddress;
ObjectAddress *dbObjectAddress = palloc0(sizeof(ObjectAddress));
ObjectAddressSet(*dbObjectAddress, DatabaseRelationId, databaseOid);
return dbObjectAddress;
}
/*
* DropDatabaseStmtObjectAddress gets the ObjectAddress of the database that is the
* object of the DropdbStmt.
*/
List *
DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
{
@ -491,6 +452,10 @@ DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
}
/*
* CreateDatabaseStmtObjectAddress gets the ObjectAddress of the database that is the
* object of the CreatedbStmt.
*/
List *
CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
{
@ -501,6 +466,9 @@ CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess)
}
/*
* GetTablespaceName gets the tablespace oid and returns the tablespace name.
*/
static char *
GetTablespaceName(Oid tablespaceOid)
{
@ -519,19 +487,6 @@ GetTablespaceName(Oid tablespaceOid)
}
/*
* DatabaseCollationInfo is used to store collation related information of a database
*/
typedef struct DatabaseCollationInfo
{
char *collation;
char *ctype;
#if PG_VERSION_NUM >= PG_VERSION_15
char *icu_locale;
char *collversion;
#endif
} DatabaseCollationInfo;
/*
* GetDatabaseCollation gets oid of a database and returns all the collation related information
* We need this method since collation related info in Form_pg_database is not accessible
@ -605,6 +560,9 @@ GetDatabaseCollation(Oid db_oid)
}
/*
* FreeDatabaseCollationInfo frees the memory allocated for DatabaseCollationInfo
*/
static void
FreeDatabaseCollationInfo(DatabaseCollationInfo collInfo)
{
@ -626,8 +584,13 @@ FreeDatabaseCollationInfo(DatabaseCollationInfo collInfo)
#if PG_VERSION_NUM >= PG_VERSION_15
/*
* GetLocaleProviderString gets the datlocprovider stored in pg_database
* and returns the string representation of the datlocprovider
*/
static char *
get_locale_provider_string(char datlocprovider)
GetLocaleProviderString(char datlocprovider)
{
switch (datlocprovider)
{
@ -656,7 +619,8 @@ get_locale_provider_string(char datlocprovider)
/*
* GenerateCreateDatabaseStatementFromPgDatabase is gets the pg_database tuple and returns the CREATE DATABASE statement
* GenerateCreateDatabaseStatementFromPgDatabase gets the pg_database tuple and returns the
* CREATE DATABASE statement that can be used to create given database.
*/
static char *
GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm)
@ -666,66 +630,64 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm)
StringInfoData str;
initStringInfo(&str);
appendStringInfo(&str, "CREATE DATABASE %s", quote_identifier(NameStr(
databaseForm->
datname)));
appendStringInfo(&str, "CREATE DATABASE %s",
quote_identifier(NameStr(databaseForm->datname)));
if (databaseForm->datdba != InvalidOid)
{
appendStringInfo(&str, " OWNER = %s", GetUserNameFromId(databaseForm->datdba,
false));
appendStringInfo(&str, " OWNER = %s",
quote_literal_cstr(GetUserNameFromId(databaseForm->datdba,false)));
}
if (databaseForm->encoding != -1)
{
appendStringInfo(&str, " ENCODING = '%s'", pg_encoding_to_char(
databaseForm->encoding));
appendStringInfo(&str, " ENCODING = %s",
quote_literal_cstr(pg_encoding_to_char(databaseForm->encoding)));
}
if (collInfo.collation != NULL)
{
appendStringInfo(&str, " LC_COLLATE = '%s'", collInfo.collation);
appendStringInfo(&str, " LC_COLLATE = %s", quote_literal_cstr(collInfo.collation));
}
if (collInfo.ctype != NULL)
{
appendStringInfo(&str, " LC_CTYPE = '%s'", collInfo.ctype);
appendStringInfo(&str, " LC_CTYPE = %s", quote_literal_cstr(collInfo.ctype));
}
#if PG_VERSION_NUM >= PG_VERSION_15
if (collInfo.icu_locale != NULL)
{
appendStringInfo(&str, " ICU_LOCALE = '%s'", collInfo.icu_locale);
appendStringInfo(&str, " ICU_LOCALE = %s", quote_literal_cstr(collInfo.icu_locale));
}
if (databaseForm->datlocprovider != 0)
{
appendStringInfo(&str, " LOCALE_PROVIDER = '%s'", get_locale_provider_string(
databaseForm->datlocprovider));
appendStringInfo(&str, " LOCALE_PROVIDER = %s",
quote_literal_cstr(GetLocaleProviderString(databaseForm->datlocprovider)));
}
if (collInfo.collversion != NULL)
{
appendStringInfo(&str, " COLLATION_VERSION = '%s'", collInfo.collversion);
appendStringInfo(&str, " COLLATION_VERSION = %s", quote_literal_cstr(collInfo.collversion));
}
#endif
if (databaseForm->dattablespace != InvalidOid)
{
appendStringInfo(&str, " TABLESPACE = %s", quote_identifier(GetTablespaceName(
databaseForm->
dattablespace)));
appendStringInfo(&str, " TABLESPACE = %s",
quote_identifier(GetTablespaceName(databaseForm->dattablespace)));
}
appendStringInfo(&str, " ALLOW_CONNECTIONS = '%s'", databaseForm->datallowconn ?
"true" : "false");
appendStringInfo(&str, " ALLOW_CONNECTIONS = %s",
quote_literal_cstr(databaseForm->datallowconn ?"true" : "false"));
if (databaseForm->datconnlimit >= 0)
{
appendStringInfo(&str, " CONNECTION LIMIT %d", databaseForm->datconnlimit);
}
appendStringInfo(&str, " IS_TEMPLATE = '%s'", databaseForm->datistemplate ? "true" :
"false");
appendStringInfo(&str, " IS_TEMPLATE = %s",
quote_literal_cstr(databaseForm->datistemplate ? "true" :"false"));
FreeDatabaseCollationInfo(collInfo);
@ -735,19 +697,21 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm)
/*
* GenerateCreateDatabaseCommandList is gets the pg_database tuples and returns the CREATE DATABASE statement list
* for all the databases in the cluster.citus_internal_database_command UDF is used to send the CREATE DATABASE
* statement to the workers since the CREATE DATABASE statement gives error in transaction context.
* GenerateCreateDatabaseCommandList gets a list of pg_database tuples and returns
* a list of CREATE DATABASE statements for all the databases.
*
* Commands in the list are wrapped by citus_internal_database_command() UDF
* to avoid from transaction block restrictions that apply to database commands
*/
List *
GenerateCreateDatabaseCommandList(void)
{
List *commands = NIL;
HeapTuple tuple;
Relation pgDatabaseRel = table_open(DatabaseRelationId, AccessShareLock);
TableScanDesc scan = table_beginscan_catalog(pgDatabaseRel, 0, NULL);
HeapTuple tuple = NULL;
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
Form_pg_database databaseForm = (Form_pg_database) GETSTRUCT(tuple);
@ -759,7 +723,7 @@ GenerateCreateDatabaseCommandList(void)
/* Generate the CREATE DATABASE statement */
appendStringInfo(outerDbStmt,
"select pg_catalog.citus_internal_database_command( %s)",
"SELECT pg_catalog.citus_internal_database_command(%s)",
quote_literal_cstr(
createStmt));

View File

@ -40,14 +40,14 @@ static char * DropTableIfExistsCommand(Oid relationId);
/*
* EnsureDependenciesExistOnAllNodes finds all the dependencies that we support and makes
* sure these are available on all workers. If not available they will be created on the
* workers via a separate session that will be committed directly so that the objects are
* sure these are available on all nodes. If not available they will be created on the
* nodes via a separate session that will be committed directly so that the objects are
* visible to potentially multiple sessions creating the shards.
*
* Note; only the actual objects are created via a separate session, the records to
* pg_dist_object are created in this session. As a side effect the objects could be
* created on the workers without a catalog entry. Updates to the objects on the coordinator
* are not propagated to the workers until the record is visible on the coordinator.
* created on the nodes without a catalog entry. Updates to the objects on local node
* are not propagated to the remote nodes until the record is visible on local node.
*
* This is solved by creating the dependencies in an idempotent manner, either via
* postgres native CREATE IF NOT EXISTS, or citus helper functions.
@ -95,7 +95,7 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
* either get it now, or get it in citus_add_node after this transaction finishes and
* the pg_dist_object record becomes visible.
*/
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(RowShareLock);
List *remoteNodeList = ActivePrimaryRemoteNodeList(RowShareLock);
/*
* Lock dependent objects explicitly to make sure same DDL command won't be sent
@ -127,12 +127,12 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
*/
if (HasAnyDependencyInPropagatedObjects(target))
{
SendCommandListToWorkersWithMetadata(ddlCommands);
SendCommandListToRemoteNodesWithMetadata(ddlCommands);
}
else
{
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodeList)
foreach_ptr(workerNode, remoteNodeList)
{
const char *nodeName = workerNode->workerName;
uint32 nodePort = workerNode->workerPort;
@ -144,8 +144,8 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
}
/*
* We do this after creating the objects on the workers, we make sure
* that objects have been created on worker nodes before marking them
* We do this after creating the objects on remote nodes, we make sure
* that objects have been created on remote nodes before marking them
* distributed, so MarkObjectDistributed wouldn't fail.
*/
foreach_ptr(dependency, dependenciesWithCommands)

View File

@ -14,7 +14,7 @@
#include "distributed/commands.h"
#include "distributed/deparser.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/version_compat.h"
#include "distributed/commands/utility_hook.h"

View File

@ -12,7 +12,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/htup_details.h"
#include "access/sysattr.h"

View File

@ -21,7 +21,7 @@
#include "miscadmin.h"
#include "funcapi.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/genam.h"
#include "access/htup_details.h"
@ -978,7 +978,6 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)
char *argmodes = NULL;
int insertorderbyat = -1;
int argsprinted = 0;
int inputargno = 0;
HeapTuple proctup = SearchSysCache1(PROCOID, funcOid);
if (!HeapTupleIsValid(proctup))
@ -1058,7 +1057,6 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)
}
}
inputargno++; /* this is a 1-based counter */
if (argsprinted == insertorderbyat)
{
appendStringInfoString(&buf, " ORDER BY ");

View File

@ -10,7 +10,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/genam.h"
#include "access/htup_details.h"
#include "access/xact.h"
@ -180,6 +180,8 @@ PreprocessIndexStmt(Node *node, const char *createIndexCommand,
return NIL;
}
EnsureCoordinator();
if (createIndexStatement->idxname == NULL)
{
/*

View File

@ -52,7 +52,7 @@
#include <netinet/in.h> /* for htons */
#include <string.h>
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/htup_details.h"
#include "access/htup.h"

View File

@ -175,7 +175,6 @@ BuildCreatePublicationStmt(Oid publicationId)
PUBLICATION_PART_ROOT :
PUBLICATION_PART_LEAF);
Oid relationId = InvalidOid;
int citusTableCount PG_USED_FOR_ASSERTS_ONLY = 0;
/* mainly for consistent ordering in test output */
relationIds = SortList(relationIds, CompareOids);
@ -199,11 +198,6 @@ BuildCreatePublicationStmt(Oid publicationId)
createPubStmt->tables = lappend(createPubStmt->tables, rangeVar);
#endif
if (IsCitusTable(relationId))
{
citusTableCount++;
}
}
/* WITH (publish_via_partition_root = true) option */

View File

@ -12,7 +12,7 @@
#include "pg_version_compat.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/heapam.h"
#include "access/htup_details.h"
@ -65,6 +65,7 @@ static DefElem * makeDefElemBool(char *name, bool value);
static List * GenerateRoleOptionsList(HeapTuple tuple);
static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options);
static List * GenerateGrantRoleStmtsOfRole(Oid roleid);
static void EnsureSequentialModeForRoleDDL(void);
static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple,
TupleDesc DbRoleSettingDescription);
@ -155,7 +156,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString)
return NIL;
}
EnsureCoordinator();
EnsurePropagationToCoordinator();
AlterRoleStmt *stmt = castNode(AlterRoleStmt, node);
@ -184,7 +185,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString)
(void *) CreateAlterRoleIfExistsCommand(stmt),
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
return NodeDDLTaskList(REMOTE_NODES, commands);
}
@ -230,7 +231,7 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString,
return NIL;
}
EnsureCoordinator();
EnsurePropagationToCoordinator();
QualifyTreeNode((Node *) stmt);
const char *sql = DeparseTreeNode((Node *) stmt);
@ -239,7 +240,7 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList);
return NodeDDLTaskList(REMOTE_NODES, commandList);
}
@ -909,7 +910,8 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString,
return NIL;
}
EnsureCoordinator();
EnsurePropagationToCoordinator();
EnsureSequentialModeForRoleDDL();
LockRelationOid(DistNodeRelationId(), RowShareLock);
@ -944,7 +946,7 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString,
commands = lappend(commands, ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
return NodeDDLTaskList(REMOTE_NODES, commands);
}
@ -1040,7 +1042,8 @@ PreprocessDropRoleStmt(Node *node, const char *queryString,
return NIL;
}
EnsureCoordinator();
EnsurePropagationToCoordinator();
EnsureSequentialModeForRoleDDL();
@ -1052,7 +1055,7 @@ PreprocessDropRoleStmt(Node *node, const char *queryString,
sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
return NodeDDLTaskList(REMOTE_NODES, commands);
}
@ -1129,7 +1132,7 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString,
return NIL;
}
EnsureCoordinator();
EnsurePropagationToCoordinator();
GrantRoleStmt *stmt = castNode(GrantRoleStmt, node);
List *allGranteeRoles = stmt->grantee_roles;
@ -1169,7 +1172,7 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString,
sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
return NodeDDLTaskList(REMOTE_NODES, commands);
}
@ -1180,11 +1183,13 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString,
List *
PostprocessGrantRoleStmt(Node *node, const char *queryString)
{
if (!EnableCreateRolePropagation || !IsCoordinator() || !ShouldPropagate())
if (!EnableCreateRolePropagation || !ShouldPropagate())
{
return NIL;
}
EnsurePropagationToCoordinator();
GrantRoleStmt *stmt = castNode(GrantRoleStmt, node);
RoleSpec *role = NULL;
@ -1332,7 +1337,7 @@ PreprocessAlterRoleRenameStmt(Node *node, const char *queryString,
Assert(stmt->renameType == OBJECT_ROLE);
EnsureCoordinator();
EnsurePropagationToCoordinator();
char *sql = DeparseTreeNode((Node *) stmt);
@ -1340,7 +1345,7 @@ PreprocessAlterRoleRenameStmt(Node *node, const char *queryString,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
return NodeDDLTaskList(REMOTE_NODES, commands);
}

View File

@ -17,7 +17,7 @@
#include "commands/defrem.h"
#include "distributed/commands.h"
#include "distributed/connection_management.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/version_compat.h"
#include "libpq-fe.h"
#include "nodes/parsenodes.h"

View File

@ -9,7 +9,7 @@
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/genam.h"
#include "access/htup_details.h"
#include "access/xact.h"

View File

@ -9,7 +9,7 @@
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/genam.h"
#include "access/table.h"

View File

@ -43,7 +43,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/genam.h"
#include "access/htup_details.h"

View File

@ -25,7 +25,8 @@
*-------------------------------------------------------------------------
*/
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "postgres.h"
#include "miscadmin.h"
@ -35,6 +36,7 @@
#include "access/htup_details.h"
#include "catalog/catalog.h"
#include "catalog/dependency.h"
#include "catalog/pg_database.h"
#include "citus_version.h"
#include "commands/dbcommands.h"
#include "commands/defrem.h"
@ -62,6 +64,7 @@
#include "distributed/multi_executor.h"
#include "distributed/multi_explain.h"
#include "distributed/multi_physical_planner.h"
#include "distributed/pg_version_constants.h"
#include "distributed/reference_table_utils.h"
#include "distributed/resource_lock.h"
#include "distributed/string_utils.h"
@ -80,7 +83,6 @@
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
#include "catalog/pg_database.h"
bool EnableDDLPropagation = true; /* ddl propagation is enabled */
@ -579,7 +581,6 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
PreprocessLockStatement((LockStmt *) parsetree, context);
}
/*
* We only process ALTER TABLE ... ATTACH PARTITION commands in the function below
* and distribute the partition if necessary.
@ -710,9 +711,9 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
}
else if (IsA(parsetree, CreateRoleStmt) && !EnableCreateRolePropagation)
{
ereport(NOTICE, (errmsg("not propagating CREATE ROLE/USER commands to worker"
ereport(NOTICE, (errmsg("not propagating CREATE ROLE/USER commands to other"
" nodes"),
errhint("Connect to worker nodes directly to manually create all"
errhint("Connect to other nodes directly to manually create all"
" necessary users and roles.")));
}
@ -726,12 +727,13 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt,
}
/*
* Make sure that dropping the role and database deletes the pg_dist_object entries. There is a
* separate logic for roles and database, since roles and database are not included as dropped objects in the
* drop event trigger. To handle it both on worker and coordinator nodes, it is not
* implemented as a part of process functions but here.
* Make sure that dropping node-wide objects deletes the pg_dist_object
* entries. There is a separate logic for node-wide objects (such as role
* and databases), since they are not included as dropped objects in the
* drop event trigger. To handle it both on worker and coordinator nodes,
* it is not implemented as a part of process functions but here.
*/
UnmarkRolesAndDatabaseDistributed(parsetree);
UnmarkNodeWideObjectsDistributed(parsetree);
pstmt->utilityStmt = parsetree;
@ -1098,16 +1100,17 @@ IsDropSchemaOrDB(Node *parsetree)
* each shard placement and COMMIT/ROLLBACK is handled by
* CoordinatedTransactionCallback function.
*
* The function errors out if the node is not the coordinator or if the DDL is on
* a partitioned table which has replication factor > 1.
*
* The function errors out if the DDL is on a partitioned table which has replication
* factor > 1, or if the the coordinator is not added into metadata and we're on a
* worker node because we want to make sure that distributed DDL jobs are executed
* on the coordinator node too. See EnsurePropagationToCoordinator() for more details.
*/
void
ExecuteDistributedDDLJob(DDLJob *ddlJob)
{
bool shouldSyncMetadata = false;
EnsureCoordinator();
EnsurePropagationToCoordinator();
ObjectAddress targetObjectAddress = ddlJob->targetObjectAddress;
@ -1131,23 +1134,24 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
{
if (shouldSyncMetadata)
{
SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION);
SendCommandToRemoteNodesWithMetadata(DISABLE_DDL_PROPAGATION);
char *currentSearchPath = CurrentSearchPath();
/*
* Given that we're relaying the query to the worker nodes directly,
* Given that we're relaying the query to the remote nodes directly,
* we should set the search path exactly the same when necessary.
*/
if (currentSearchPath != NULL)
{
SendCommandToWorkersWithMetadata(
SendCommandToRemoteNodesWithMetadata(
psprintf("SET LOCAL search_path TO %s;", currentSearchPath));
}
if (ddlJob->metadataSyncCommand != NULL)
{
SendCommandToWorkersWithMetadata((char *) ddlJob->metadataSyncCommand);
SendCommandToRemoteNodesWithMetadata(
(char *) ddlJob->metadataSyncCommand);
}
}
@ -1226,7 +1230,7 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
char *currentSearchPath = CurrentSearchPath();
/*
* Given that we're relaying the query to the worker nodes directly,
* Given that we're relaying the query to the remote nodes directly,
* we should set the search path exactly the same when necessary.
*/
if (currentSearchPath != NULL)
@ -1238,7 +1242,7 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
commandList = lappend(commandList, (char *) ddlJob->metadataSyncCommand);
SendBareCommandListToMetadataWorkers(commandList);
SendBareCommandListToRemoteMetadataNodes(commandList);
}
}
PG_CATCH();
@ -1265,10 +1269,12 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob)
{
ereport(WARNING,
(errmsg(
"Commands that are not transaction-safe may result in partial failure"
", potentially leading to an inconsistent state.\nIf the problematic command"
" is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the "
"object,\nif applicable, and then reattempt the original command.")));
"Commands that are not transaction-safe may result in "
"partial failure, potentially leading to an inconsistent "
"state.\nIf the problematic command is a CREATE operation, "
"consider using the 'IF EXISTS' syntax to drop the object,"
"\nif applicable, and then re-attempt the original command.")));
PG_RE_THROW();
}
}
@ -1483,12 +1489,12 @@ DDLTaskList(Oid relationId, const char *commandString)
/*
* NontransactionalNodeDDLTask builds a list of tasks to execute a DDL command on a
* NontransactionalNodeDDLTaskList builds a list of tasks to execute a DDL command on a
* given target set of nodes with cannotBeExecutedInTransaction is set to make sure
* that list is being executed without a transaction.
* that task list is executed outside a transaction block.
*/
List *
NontransactionalNodeDDLTask(TargetWorkerSet targets, List *commands)
NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands)
{
List *ddlJobs = NodeDDLTaskList(targets, commands);
DDLJob *ddlJob = NULL;

View File

@ -10,7 +10,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "commands/defrem.h"
#include "commands/vacuum.h"
@ -184,7 +184,6 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList,
CitusVacuumParams vacuumParams)
{
int relationIndex = 0;
int executedVacuumCount = 0;
Oid relationId = InvalidOid;
foreach_oid(relationId, relationIdList)
@ -197,7 +196,6 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList,
/* local execution is not implemented for VACUUM commands */
bool localExecutionSupported = false;
ExecuteUtilityTaskList(taskList, localExecutionSupported);
executedVacuumCount++;
}
relationIndex++;
}

View File

@ -33,7 +33,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "miscadmin.h"
@ -302,8 +302,8 @@ EnsureConnectionPossibilityForRemotePrimaryNodes(void)
* seem to cause any problems as none of the placements that we are
* going to access would be on the new node.
*/
List *primaryNodeList = ActivePrimaryRemoteNodeList(NoLock);
EnsureConnectionPossibilityForNodeList(primaryNodeList);
List *remoteNodeList = ActivePrimaryRemoteNodeList(NoLock);
EnsureConnectionPossibilityForNodeList(remoteNodeList);
}

View File

@ -11,7 +11,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/hash.h"
#include "distributed/colocation_utils.h"

View File

@ -13,7 +13,7 @@
#include "postgres.h"
#include "pgstat.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "libpq-fe.h"

View File

@ -1,56 +1,66 @@
/*
* citus_deparseutils.c
* ---------------------
*
* This file contains common functions used for deparsing PostgreSQL statements
* to their equivalent SQL representation.
*
*/
#include "postgres.h"
#include "utils/builtins.h"
#include "commands/defrem.h"
#include "distributed/deparser.h"
#include "distributed/pg_version_constants.h"
#include "utils/builtins.h"
#include "utils/elog.h"
#include "utils/rel.h"
#include "utils/relcache.h"
#include "utils/syscache.h"
#include "utils/typcache.h"
#include "distributed/deparser.h"
#include "distributed/pg_version_constants.h"
/**
* Convert a DefElem option to a SQL statement and append it to the given StringInfo buffer.
* DefElemOptionToStatement converts a DefElem option to a SQL statement and
* appends it to the given StringInfo buffer.
*
* @param buf The StringInfo buffer to append the SQL statement to.
* @param option The DefElem option to convert to a SQL statement.
* @param opt_formats The option format specification to use for the conversion.
* @param num_opt_formats The number of option formats in the opt_formats array.
* @param optionFormats The option format specification to use for the conversion.
* @param optionFormatsLen The number of option formats in the opt_formats array.
*/
void
optionToStatement(StringInfo buf, DefElem *option, const struct
option_format *opt_formats, int
opt_formats_len)
DefElemOptionToStatement(StringInfo buf, DefElem *option, const
DefElemOptionFormat *optionFormats, int
optionFormatsLen)
{
const char *name = option->defname;
int i;
for (i = 0; i < opt_formats_len; i++)
for (i = 0; i < optionFormatsLen; i++)
{
if (strcmp(name, opt_formats[i].name) == 0)
if (strcmp(name, optionFormats[i].name) == 0)
{
switch (opt_formats[i].type)
switch (optionFormats[i].type)
{
case OPTION_FORMAT_STRING:
{
char *value = defGetString(option);
appendStringInfo(buf, opt_formats[i].format, quote_identifier(value));
appendStringInfo(buf, optionFormats[i].format, quote_identifier(
value));
break;
}
case OPTION_FORMAT_INTEGER:
{
int32 value = defGetInt32(option);
appendStringInfo(buf, opt_formats[i].format, value);
appendStringInfo(buf, optionFormats[i].format, value);
break;
}
case OPTION_FORMAT_BOOLEAN:
{
bool value = defGetBoolean(option);
appendStringInfo(buf, opt_formats[i].format, value ? "true" :
appendStringInfo(buf, optionFormats[i].format, value ? "true" :
"false");
break;
}
@ -59,7 +69,7 @@ optionToStatement(StringInfo buf, DefElem *option, const struct
case OPTION_FORMAT_OBJECT_ID:
{
Oid value = defGetObjectId(option);
appendStringInfo(buf, opt_formats[i].format, value);
appendStringInfo(buf, optionFormats[i].format, value);
break;
}
@ -67,14 +77,14 @@ optionToStatement(StringInfo buf, DefElem *option, const struct
case OPTION_FORMAT_LITERAL_CSTR:
{
char *value = defGetString(option);
appendStringInfo(buf, opt_formats[i].format, quote_literal_cstr(
appendStringInfo(buf, optionFormats[i].format, quote_literal_cstr(
value));
break;
}
default:
{
elog(ERROR, "unrecognized option type: %d", opt_formats[i].type);
elog(ERROR, "unrecognized option type: %d", optionFormats[i].type);
break;
}
}

View File

@ -10,7 +10,7 @@
#include "postgres.h"
#include "miscadmin.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include <stddef.h>

View File

@ -12,24 +12,24 @@
#include "postgres.h"
#include "pg_version_compat.h"
#include "catalog/namespace.h"
#include "lib/stringinfo.h"
#include "nodes/parsenodes.h"
#include "utils/builtins.h"
#include "distributed/deparser.h"
#include "distributed/citus_ruleutils.h"
#include "commands/defrem.h"
#include "distributed/deparser.h"
#include "distributed/citus_ruleutils.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
#include "distributed/log_utils.h"
#include "parser/parse_type.h"
#include "distributed/listutils.h"
static void AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt);
static void AppendAlterDatabaseStmt(StringInfo buf, AlterDatabaseStmt *stmt);
const struct option_format create_database_option_formats[] = {
const DefElemOptionFormat create_database_option_formats[] = {
{ "owner", " OWNER %s", OPTION_FORMAT_STRING },
{ "template", " TEMPLATE %s", OPTION_FORMAT_STRING },
{ "encoding", " ENCODING %s", OPTION_FORMAT_LITERAL_CSTR },
@ -49,12 +49,29 @@ const struct option_format create_database_option_formats[] = {
};
const struct option_format alter_database_option_formats[] = {
const DefElemOptionFormat alter_database_option_formats[] = {
{ "is_template", " IS_TEMPLATE %s", OPTION_FORMAT_BOOLEAN },
{ "allow_connections", " ALLOW_CONNECTIONS %s", OPTION_FORMAT_BOOLEAN },
{ "connection_limit", " CONNECTION LIMIT %d", OPTION_FORMAT_INTEGER },
};
/*
* DeparseAlterDatabaseOwnerStmt
* Deparse an AlterDatabaseOwnerStmt node
*
* This function is responsible for producing a string representation of an
* AlterDatabaseOwnerStmt node, which represents an ALTER DATABASE statement
* that changes the owner of a database. The output string includes the ALTER
* DATABASE keyword, the name of the database being altered, and the new owner
* of the database.
*
* Parameters:
* - node: a pointer to the AlterDatabaseOwnerStmt node to be deparsed
*
* Returns:
* - a string representation of the ALTER DATABASE statement
*/
char *
DeparseAlterDatabaseOwnerStmt(Node *node)
{
@ -70,6 +87,15 @@ DeparseAlterDatabaseOwnerStmt(Node *node)
}
/*
*
* AppendAlterDatabaseOwnerStmt
* Append an ALTER DATABASE statement for changing the owner of a database to the given StringInfo buffer.
*
* Parameters:
* - buf: The StringInfo buffer to append the statement to.
* - stmt: The AlterOwnerStmt representing the ALTER DATABASE statement to append.
*/
static void
AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt)
{
@ -256,6 +282,34 @@ DeparseAlterDatabaseSetStmt(Node *node)
}
/*
* Validates for if option is template, lc_type, locale or lc_collate, propagation will
* not be supported since template and strategy options are not stored in the catalog
* and lc_type, locale and lc_collate options depends on template parameter.
*/
static void
ValidateCreateDatabaseOptions(DefElem *option)
{
if (strcmp(option->defname, "strategy") == 0){
ereport(ERROR,
errmsg("CREATE DATABASE option \"%s\" is not supported",
option->defname));
}
char *optionValue = defGetString(option);
if (strcmp(option->defname,"template") == 0 && strcmp(optionValue, "template1") != 0)
{
ereport(ERROR,errmsg("Only template1 is supported as template parameter for CREATE DATABASE"));
}
}
/*
* Prepares a CREATE DATABASE statement with given empty StringInfo buffer and CreatedbStmt node.
*/
static void
AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt)
{
@ -267,27 +321,18 @@ AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt)
foreach_ptr(option, stmt->options)
{
/*If option is template, lc_type, locale or lc_collate, propagation will not be supportted */
/* since template database is not stored in the catalog */
if (strcmp(option->defname, "template") == 0 ||
strcmp(option->defname, "strategy") == 0 ||
strcmp(option->defname, "lc_ctype") == 0 ||
strcmp(option->defname, "locale") == 0 ||
strcmp(option->defname, "lc_collate") == 0 ||
strcmp(option->defname, "icu_locale") == 0 ||
strcmp(option->defname, "locale_provider") == 0)
{
ereport(ERROR,
errmsg("CREATE DATABASE option \"%s\" is not supported",
option->defname));
}
ValidateCreateDatabaseOptions(option);
optionToStatement(buf, option, create_database_option_formats, lengthof(
create_database_option_formats));
DefElemOptionToStatement(buf, option, create_database_option_formats,
lengthof(create_database_option_formats));
}
}
/*
* Converts a CreatedbStmt structure into a SQL command string.
* Used in the deparsing of Create database statement.
*/
char *
DeparseCreateDatabaseStmt(Node *node)
{
@ -301,13 +346,16 @@ DeparseCreateDatabaseStmt(Node *node)
}
/*
* Prepares a DROP DATABASE statement with given empty StringInfo buffer and DropdbStmt node.
*/
static void
AppendDropDatabaseStmt(StringInfo buf, DropdbStmt *stmt)
{
char *if_exists_statement = stmt->missing_ok ? "IF EXISTS" : "";
char *ifExistsStatement = stmt->missing_ok ? "IF EXISTS" : "";
appendStringInfo(buf,
"DROP DATABASE %s %s",
if_exists_statement,
ifExistsStatement,
quote_identifier(stmt->dbname));
DefElem *option = NULL;
@ -328,6 +376,10 @@ AppendDropDatabaseStmt(StringInfo buf, DropdbStmt *stmt)
}
/*
* Converts a DropdbStmt structure into a SQL command string.
* Used in the deparsing of drop database statement.
*/
char *
DeparseDropDatabaseStmt(Node *node)
{

View File

@ -12,7 +12,7 @@
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "catalog/namespace.h"
#include "distributed/citus_ruleutils.h"

View File

@ -14,7 +14,7 @@
* This needs to be closely in sync with the core code.
*-------------------------------------------------------------------------
*/
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "pg_config.h"

View File

@ -14,7 +14,7 @@
* This needs to be closely in sync with the core code.
*-------------------------------------------------------------------------
*/
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "pg_config.h"

View File

@ -14,7 +14,7 @@
* This needs to be closely in sync with the core code.
*-------------------------------------------------------------------------
*/
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "pg_config.h"

View File

@ -9,7 +9,7 @@
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "miscadmin.h"

View File

@ -8,7 +8,7 @@
*-------------------------------------------------------------------------
*/
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include <sys/stat.h>
#include <unistd.h>

View File

@ -78,7 +78,7 @@
#include "postgres.h"
#include "miscadmin.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/adaptive_executor.h"
#include "distributed/commands/utility_hook.h"
@ -567,7 +567,7 @@ LogLocalCommand(Task *task)
*
* One slightly different case is modifications to replicated tables
* (e.g., reference tables) where a single task ends in two separate tasks
* and the local task is added to localTaskList and the remaning ones to
* and the local task is added to localTaskList and the remaining ones to
* the remoteTaskList.
*/
void

View File

@ -10,7 +10,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "miscadmin.h"

View File

@ -15,7 +15,7 @@
#include "miscadmin.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/hash.h"
#include "catalog/pg_authid.h"

View File

@ -11,7 +11,7 @@
#include "postgres.h"
#include "distributed/commands.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/genam.h"
#include "access/heapam.h"

View File

@ -10,7 +10,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "miscadmin.h"
@ -55,6 +55,7 @@
static char * CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress);
static int ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes,
Datum *paramValues);
static bool IsObjectDistributed(const ObjectAddress *address);
PG_FUNCTION_INFO_V1(citus_unmark_object_distributed);
PG_FUNCTION_INFO_V1(master_unmark_object_distributed);
@ -150,7 +151,7 @@ ObjectExists(const ObjectAddress *address)
/*
* MarkObjectDistributed marks an object as a distributed object. Marking is done
* by adding appropriate entries to citus.pg_dist_object and also marking the object
* as distributed by opening a connection using current user to all of the workers
* as distributed by opening a connection using current user to all remote nodes
* with metadata if object propagation is on.
*
* This function should be used if the user creating the given object. If you want
@ -165,7 +166,7 @@ MarkObjectDistributed(const ObjectAddress *distAddress)
{
char *workerPgDistObjectUpdateCommand =
CreatePgDistObjectEntryCommand(distAddress);
SendCommandToWorkersWithMetadata(workerPgDistObjectUpdateCommand);
SendCommandToRemoteNodesWithMetadata(workerPgDistObjectUpdateCommand);
}
}
@ -173,7 +174,7 @@ MarkObjectDistributed(const ObjectAddress *distAddress)
/*
* MarkObjectDistributedViaSuperUser marks an object as a distributed object. Marking
* is done by adding appropriate entries to citus.pg_dist_object and also marking the
* object as distributed by opening a connection using super user to all of the workers
* object as distributed by opening a connection using super user to all remote nodes
* with metadata if object propagation is on.
*
* This function should be used to mark dependent object as distributed. If you want
@ -188,7 +189,7 @@ MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress)
{
char *workerPgDistObjectUpdateCommand =
CreatePgDistObjectEntryCommand(distAddress);
SendCommandToWorkersWithMetadataViaSuperUser(workerPgDistObjectUpdateCommand);
SendCommandToRemoteNodesWithMetadataViaSuperUser(workerPgDistObjectUpdateCommand);
}
}
@ -358,8 +359,12 @@ ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes,
}
/*
* Deletes all pg_dist_object records for distributed roles in `DROP ROLE` statement a
* and for all databases in `DROP DATABASE` statement
*/
void
UnmarkRolesAndDatabaseDistributed(Node *node)
UnmarkNodeWideObjectsDistributed(Node *node)
{
if (IsA(node, DropRoleStmt))
{
@ -378,9 +383,9 @@ UnmarkRolesAndDatabaseDistributed(Node *node)
char *dbName = stmt->dbname;
Oid dbOid = get_database_oid(dbName, stmt->missing_ok);
ObjectAddress *dbAddress = palloc0(sizeof(ObjectAddress));
ObjectAddressSet(*dbAddress, DatabaseRelationId, dbOid);
UnmarkObjectDistributed(dbAddress);
ObjectAddress *dbObjectAddress = palloc0(sizeof(ObjectAddress));
ObjectAddressSet(*dbObjectAddress, DatabaseRelationId, dbOid);
UnmarkObjectDistributed(dbObjectAddress);
}
}
@ -420,7 +425,7 @@ UnmarkObjectDistributed(const ObjectAddress *address)
* IsObjectDistributed returns if the object addressed is already distributed in the
* cluster. This performs a local indexed lookup in pg_dist_object.
*/
bool
static bool
IsObjectDistributed(const ObjectAddress *address)
{
ScanKeyData key[3];

View File

@ -8,7 +8,7 @@
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "pg_version_compat.h"
#include "stdint.h"

View File

@ -30,12 +30,15 @@
#include "catalog/pg_attrdef.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_database.h"
#include "catalog/pg_database_d.h"
#include "catalog/pg_depend.h"
#include "catalog/pg_foreign_server.h"
#include "catalog/pg_namespace.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "commands/async.h"
#include "commands/dbcommands.h"
#include "distributed/argutils.h"
#include "distributed/backend_data.h"
#include "distributed/citus_ruleutils.h"
@ -134,7 +137,7 @@ static bool ShouldSkipMetadataChecks(void);
static void EnsurePartitionMetadataIsSane(Oid relationId, char distributionMethod,
int colocationId, char replicationModel,
Var *distributionKey);
static void EnsureCoordinatorInitiatedOperation(void);
static void EnsureCitusInitiatedOperation(void);
static void EnsureShardMetadataIsSane(Oid relationId, int64 shardId, char storageType,
text *shardMinValue,
text *shardMaxValue);
@ -179,6 +182,7 @@ PG_FUNCTION_INFO_V1(citus_internal_delete_colocation_metadata);
PG_FUNCTION_INFO_V1(citus_internal_add_tenant_schema);
PG_FUNCTION_INFO_V1(citus_internal_delete_tenant_schema);
PG_FUNCTION_INFO_V1(citus_internal_update_none_dist_table_metadata);
PG_FUNCTION_INFO_V1(citus_internal_database_command);
static bool got_SIGTERM = false;
@ -1001,7 +1005,7 @@ citus_internal_add_object_metadata(PG_FUNCTION_ARGS)
if (!ShouldSkipMetadataChecks())
{
/* this UDF is not allowed for executing as a separate command */
EnsureCoordinatorInitiatedOperation();
EnsureCitusInitiatedOperation();
/*
* Ensure given distributionArgumentIndex and colocationId values are
@ -3090,7 +3094,7 @@ citus_internal_add_partition_metadata(PG_FUNCTION_ARGS)
if (!ShouldSkipMetadataChecks())
{
/* this UDF is not allowed allowed for executing as a separate command */
EnsureCoordinatorInitiatedOperation();
EnsureCitusInitiatedOperation();
if (distributionMethod == DISTRIBUTE_BY_NONE && distributionColumnVar != NULL)
{
@ -3206,7 +3210,7 @@ citus_internal_delete_partition_metadata(PG_FUNCTION_ARGS)
if (!ShouldSkipMetadataChecks())
{
EnsureCoordinatorInitiatedOperation();
EnsureCitusInitiatedOperation();
}
DeletePartitionRow(relationId);
@ -3254,7 +3258,7 @@ citus_internal_add_shard_metadata(PG_FUNCTION_ARGS)
if (!ShouldSkipMetadataChecks())
{
/* this UDF is not allowed allowed for executing as a separate command */
EnsureCoordinatorInitiatedOperation();
EnsureCitusInitiatedOperation();
/*
* Even if the table owner is a malicious user and the shard metadata is
@ -3272,19 +3276,13 @@ citus_internal_add_shard_metadata(PG_FUNCTION_ARGS)
/*
* EnsureCoordinatorInitiatedOperation is a helper function which ensures that
* the execution is initiated by the coordinator on a worker node.
* EnsureCitusInitiatedOperation is a helper function which ensures that
* the execution is initiated by Citus.
*/
static void
EnsureCoordinatorInitiatedOperation(void)
EnsureCitusInitiatedOperation(void)
{
/*
* We are restricting the operation to only MX workers with the local group id
* check. The other two checks are to ensure that the operation is initiated
* by the coordinator.
*/
if (!(IsCitusInternalBackend() || IsRebalancerInternalBackend()) ||
GetLocalGroupId() == COORDINATOR_GROUP_ID)
if (!(IsCitusInternalBackend() || IsRebalancerInternalBackend()))
{
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("This is an internal Citus function can only be "
@ -3465,7 +3463,7 @@ citus_internal_delete_placement_metadata(PG_FUNCTION_ARGS)
if (!ShouldSkipMetadataChecks())
{
/* this UDF is not allowed allowed for executing as a separate command */
EnsureCoordinatorInitiatedOperation();
EnsureCitusInitiatedOperation();
}
DeleteShardPlacementRow(placementId);
@ -3513,7 +3511,7 @@ citus_internal_add_placement_metadata_internal(int64 shardId, int64 shardLength,
if (!ShouldSkipMetadataChecks())
{
/* this UDF is not allowed allowed for executing as a separate command */
EnsureCoordinatorInitiatedOperation();
EnsureCitusInitiatedOperation();
/*
* Even if the table owner is a malicious user, as long as the shard placements
@ -3608,7 +3606,7 @@ citus_internal_update_placement_metadata(PG_FUNCTION_ARGS)
if (!ShouldSkipMetadataChecks())
{
/* this UDF is not allowed allowed for executing as a separate command */
EnsureCoordinatorInitiatedOperation();
EnsureCitusInitiatedOperation();
if (!ShardExists(shardId))
{
@ -3672,7 +3670,7 @@ citus_internal_delete_shard_metadata(PG_FUNCTION_ARGS)
if (!ShouldSkipMetadataChecks())
{
/* this UDF is not allowed allowed for executing as a separate command */
EnsureCoordinatorInitiatedOperation();
EnsureCitusInitiatedOperation();
if (!ShardExists(shardId))
{
@ -3715,7 +3713,7 @@ citus_internal_update_relation_colocation(PG_FUNCTION_ARGS)
if (!ShouldSkipMetadataChecks())
{
/* this UDF is not allowed allowed for executing as a separate command */
EnsureCoordinatorInitiatedOperation();
EnsureCitusInitiatedOperation();
/* ensure that the table is in pg_dist_partition */
char partitionMethod = PartitionMethodViaCatalog(relationId);
@ -3781,7 +3779,7 @@ citus_internal_add_colocation_metadata(PG_FUNCTION_ARGS)
if (!ShouldSkipMetadataChecks())
{
/* this UDF is not allowed allowed for executing as a separate command */
EnsureCoordinatorInitiatedOperation();
EnsureCitusInitiatedOperation();
}
InsertColocationGroupLocally(colocationId, shardCount, replicationFactor,
@ -3806,7 +3804,7 @@ citus_internal_delete_colocation_metadata(PG_FUNCTION_ARGS)
if (!ShouldSkipMetadataChecks())
{
/* this UDF is not allowed allowed for executing as a separate command */
EnsureCoordinatorInitiatedOperation();
EnsureCitusInitiatedOperation();
}
DeleteColocationGroupLocally(colocationId);
@ -3885,7 +3883,7 @@ citus_internal_update_none_dist_table_metadata(PG_FUNCTION_ARGS)
if (!ShouldSkipMetadataChecks())
{
EnsureCoordinatorInitiatedOperation();
EnsureCitusInitiatedOperation();
}
UpdateNoneDistTableMetadata(relationId, replicationModel,
@ -3895,6 +3893,80 @@ citus_internal_update_none_dist_table_metadata(PG_FUNCTION_ARGS)
}
/*
* citus_internal_database_command is an internal UDF to
* create/drop a database in an idempotent maner without
* transaction block restrictions.
*/
Datum
citus_internal_database_command(PG_FUNCTION_ARGS)
{
CheckCitusVersion(ERROR);
if (!ShouldSkipMetadataChecks())
{
EnsureCoordinatorInitiatedOperation();
}
PG_ENSURE_ARGNOTNULL(0, "database command");
text *commandText = PG_GETARG_TEXT_P(0);
char *command = text_to_cstring(commandText);
Node *parseTree = ParseTreeNode(command);
int saveNestLevel = NewGUCNestLevel();
set_config_option("citus.enable_ddl_propagation", "off",
(superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION,
GUC_ACTION_LOCAL, true, 0, false);
set_config_option("citus.enable_create_database_propagation", "off",
(superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION,
GUC_ACTION_LOCAL, true, 0, false);
/*
* createdb() / DropDatabase() uses ParseState to report the error position for the
* input command and the position is reported to be 0 when it's provided as NULL.
* We're okay with that because we don't expect this UDF to be called with an incorrect
* DDL command.
*/
ParseState *pstate = NULL;
if (IsA(parseTree, CreatedbStmt))
{
CreatedbStmt *stmt = castNode(CreatedbStmt, parseTree);
bool missingOk = true;
Oid databaseOid = get_database_oid(stmt->dbname, missingOk);
if (!OidIsValid(databaseOid))
{
createdb(pstate, (CreatedbStmt *) parseTree);
}
}
else if (IsA(parseTree, DropdbStmt))
{
DropdbStmt *stmt = castNode(DropdbStmt, parseTree);
bool missingOk = false;
Oid databaseOid = get_database_oid(stmt->dbname, missingOk);
if (OidIsValid(databaseOid))
{
DropDatabase(pstate, (DropdbStmt *) parseTree);
}
}
else
{
ereport(ERROR, (errmsg("unsupported command type %d", nodeTag(parseTree))));
}
/* Rollbacks GUCs to the state before this session */
AtEOXact_GUC(true, saveNestLevel);
PG_RETURN_VOID();
}
/*
* SyncNewColocationGroup synchronizes a new pg_dist_colocation entry to a worker.
*/
@ -4503,7 +4575,7 @@ PropagateNodeWideObjectsCommandList(void)
if (EnableCreateDatabasePropagation)
{
/* Get commands for database creation */
/* get commands for database creation */
List *createDatabaseCommands = GenerateCreateDatabaseCommandList();
ddlCommands = list_concat(ddlCommands, createDatabaseCommands);
}

View File

@ -17,13 +17,14 @@
#include "libpq-fe.h"
#include "miscadmin.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/genam.h"
#include "access/htup_details.h"
#include "access/sysattr.h"
#include "access/xact.h"
#include "catalog/dependency.h"
#include "catalog/index.h"
#include "catalog/indexing.h"
#include "catalog/pg_authid.h"
#include "catalog/pg_constraint.h"
@ -88,11 +89,11 @@ static uint64 * AllocateUint64(uint64 value);
static void RecordDistributedRelationDependencies(Oid distributedRelationId);
static GroupShardPlacement * TupleToGroupShardPlacement(TupleDesc tupleDesc,
HeapTuple heapTuple);
static bool DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType,
bool failOnError, uint64 *tableSize);
static bool DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
SizeQueryType sizeQueryType, bool failOnError,
uint64 *tableSize);
static bool DistributedRelationSize(Oid relationId, SizeQueryType sizeQueryType,
bool failOnError, uint64 *relationSize);
static bool DistributedRelationSizeOnWorker(WorkerNode *workerNode, Oid relationId,
SizeQueryType sizeQueryType, bool failOnError,
uint64 *relationSize);
static List * ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId);
static char * GenerateShardIdNameValuesForShardList(List *shardIntervalList,
bool firstValue);
@ -282,7 +283,7 @@ citus_shard_sizes(PG_FUNCTION_ARGS)
/*
* citus_total_relation_size accepts a table name and returns a distributed table
* citus_total_relation_size accepts a distributed table name and returns a distributed table
* and its indexes' total relation size.
*/
Datum
@ -294,20 +295,20 @@ citus_total_relation_size(PG_FUNCTION_ARGS)
bool failOnError = PG_GETARG_BOOL(1);
SizeQueryType sizeQueryType = TOTAL_RELATION_SIZE;
uint64 tableSize = 0;
uint64 relationSize = 0;
if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &tableSize))
if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize))
{
Assert(!failOnError);
PG_RETURN_NULL();
}
PG_RETURN_INT64(tableSize);
PG_RETURN_INT64(relationSize);
}
/*
* citus_table_size accepts a table name and returns a distributed table's total
* citus_table_size accepts a distributed table name and returns a distributed table's total
* relation size.
*/
Datum
@ -318,21 +319,24 @@ citus_table_size(PG_FUNCTION_ARGS)
Oid relationId = PG_GETARG_OID(0);
bool failOnError = true;
SizeQueryType sizeQueryType = TABLE_SIZE;
uint64 tableSize = 0;
uint64 relationSize = 0;
if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &tableSize))
/* We do not check if relation is really a table, like PostgreSQL is doing. */
if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize))
{
Assert(!failOnError);
PG_RETURN_NULL();
}
PG_RETURN_INT64(tableSize);
PG_RETURN_INT64(relationSize);
}
/*
* citus_relation_size accept a table name and returns a relation's 'main'
* citus_relation_size accept a distributed relation name and returns a relation's 'main'
* fork's size.
*
* Input relation is allowed to be an index on a distributed table too.
*/
Datum
citus_relation_size(PG_FUNCTION_ARGS)
@ -344,7 +348,7 @@ citus_relation_size(PG_FUNCTION_ARGS)
SizeQueryType sizeQueryType = RELATION_SIZE;
uint64 relationSize = 0;
if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &relationSize))
if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize))
{
Assert(!failOnError);
PG_RETURN_NULL();
@ -506,13 +510,16 @@ ReceiveShardIdAndSizeResults(List *connectionList, Tuplestorestate *tupleStore,
/*
* DistributedTableSize is helper function for each kind of citus size functions.
* It first checks whether the table is distributed and size query can be run on
* it. Connection to each node has to be established to get the size of the table.
* DistributedRelationSize is helper function for each kind of citus size
* functions. It first checks whether the relation is a distributed table or an
* index belonging to a distributed table and size query can be run on it.
* Connection to each node has to be established to get the size of the
* relation.
* Input relation is allowed to be an index on a distributed table too.
*/
static bool
DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnError,
uint64 *tableSize)
DistributedRelationSize(Oid relationId, SizeQueryType sizeQueryType,
bool failOnError, uint64 *relationSize)
{
int logLevel = WARNING;
@ -538,7 +545,7 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr
if (relation == NULL)
{
ereport(logLevel,
(errmsg("could not compute table size: relation does not exist")));
(errmsg("could not compute relation size: relation does not exist")));
return false;
}
@ -553,8 +560,9 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr
{
uint64 relationSizeOnNode = 0;
bool gotSize = DistributedTableSizeOnWorker(workerNode, relationId, sizeQueryType,
failOnError, &relationSizeOnNode);
bool gotSize = DistributedRelationSizeOnWorker(workerNode, relationId,
sizeQueryType,
failOnError, &relationSizeOnNode);
if (!gotSize)
{
return false;
@ -563,21 +571,22 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr
sumOfSizes += relationSizeOnNode;
}
*tableSize = sumOfSizes;
*relationSize = sumOfSizes;
return true;
}
/*
* DistributedTableSizeOnWorker gets the workerNode and relationId to calculate
* DistributedRelationSizeOnWorker gets the workerNode and relationId to calculate
* size of that relation on the given workerNode by summing up the size of each
* shard placement.
* Input relation is allowed to be an index on a distributed table too.
*/
static bool
DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
SizeQueryType sizeQueryType,
bool failOnError, uint64 *tableSize)
DistributedRelationSizeOnWorker(WorkerNode *workerNode, Oid relationId,
SizeQueryType sizeQueryType,
bool failOnError, uint64 *relationSize)
{
int logLevel = WARNING;
@ -591,6 +600,17 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
uint32 connectionFlag = 0;
PGresult *result = NULL;
/* if the relation is an index, update relationId and define indexId */
Oid indexId = InvalidOid;
Oid relKind = get_rel_relkind(relationId);
if (relKind == RELKIND_INDEX || relKind == RELKIND_PARTITIONED_INDEX)
{
indexId = relationId;
bool missingOk = false;
relationId = IndexGetRelation(indexId, missingOk);
}
List *shardIntervalsOnNode = ShardIntervalsOnWorkerGroup(workerNode, relationId);
/*
@ -598,21 +618,22 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
* But citus size functions shouldn't include them, like PG.
*/
bool optimizePartitionCalculations = false;
StringInfo tableSizeQuery = GenerateSizeQueryOnMultiplePlacements(
StringInfo relationSizeQuery = GenerateSizeQueryOnMultiplePlacements(
shardIntervalsOnNode,
indexId,
sizeQueryType,
optimizePartitionCalculations);
MultiConnection *connection = GetNodeConnection(connectionFlag, workerNodeName,
workerNodePort);
int queryResult = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data,
int queryResult = ExecuteOptionalRemoteCommand(connection, relationSizeQuery->data,
&result);
if (queryResult != 0)
{
ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("could not connect to %s:%d to get size of "
"table \"%s\"",
"relation \"%s\"",
workerNodeName, workerNodePort,
get_rel_name(relationId))));
@ -626,19 +647,19 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
ClearResults(connection, failOnError);
ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("cannot parse size of table \"%s\" from %s:%d",
errmsg("cannot parse size of relation \"%s\" from %s:%d",
get_rel_name(relationId), workerNodeName,
workerNodePort)));
return false;
}
StringInfo tableSizeStringInfo = (StringInfo) linitial(sizeList);
char *tableSizeString = tableSizeStringInfo->data;
StringInfo relationSizeStringInfo = (StringInfo) linitial(sizeList);
char *relationSizeString = relationSizeStringInfo->data;
if (strlen(tableSizeString) > 0)
if (strlen(relationSizeString) > 0)
{
*tableSize = SafeStringToUint64(tableSizeString);
*relationSize = SafeStringToUint64(relationSizeString);
}
else
{
@ -647,7 +668,7 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId,
* being executed. For this case we get an empty string as table size.
* We can take that as zero to prevent any unnecessary errors.
*/
*tableSize = 0;
*relationSize = 0;
}
PQclear(result);
@ -732,7 +753,7 @@ ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId)
/*
* GenerateSizeQueryOnMultiplePlacements generates a select size query to get
* size of multiple tables. Note that, different size functions supported by PG
* size of multiple relations. Note that, different size functions supported by PG
* are also supported by this function changing the size query type given as the
* last parameter to function. Depending on the sizeQueryType enum parameter, the
* generated query will call one of the functions: pg_relation_size,
@ -740,9 +761,13 @@ ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId)
* This function uses UDFs named worker_partitioned_*_size for partitioned tables,
* if the parameter optimizePartitionCalculations is true. The UDF to be called is
* determined by the parameter sizeQueryType.
*
* indexId is provided if we're interested in the size of an index, not the whole
* table.
*/
StringInfo
GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList,
Oid indexId,
SizeQueryType sizeQueryType,
bool optimizePartitionCalculations)
{
@ -766,16 +791,20 @@ GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList,
*/
continue;
}
/* we need to build the shard relation name, being an index or table */
Oid objectId = OidIsValid(indexId) ? indexId : shardInterval->relationId;
uint64 shardId = shardInterval->shardId;
Oid schemaId = get_rel_namespace(shardInterval->relationId);
Oid schemaId = get_rel_namespace(objectId);
char *schemaName = get_namespace_name(schemaId);
char *shardName = get_rel_name(shardInterval->relationId);
char *shardName = get_rel_name(objectId);
AppendShardIdToName(&shardName, shardId);
char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName);
char *quotedShardName = quote_literal_cstr(shardQualifiedName);
/* for partitoned tables, we will call worker_partitioned_... size functions */
/* for partitioned tables, we will call worker_partitioned_... size functions */
if (optimizePartitionCalculations && PartitionedTable(shardInterval->relationId))
{
partitionedShardNames = lappend(partitionedShardNames, quotedShardName);
@ -1010,7 +1039,7 @@ AppendShardIdNameValues(StringInfo selectQuery, ShardInterval *shardInterval)
/*
* ErrorIfNotSuitableToGetSize determines whether the table is suitable to find
* ErrorIfNotSuitableToGetSize determines whether the relation is suitable to find
* its' size with internal functions.
*/
static void
@ -1018,11 +1047,32 @@ ErrorIfNotSuitableToGetSize(Oid relationId)
{
if (!IsCitusTable(relationId))
{
char *relationName = get_rel_name(relationId);
char *escapedQueryString = quote_literal_cstr(relationName);
ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg("cannot calculate the size because relation %s is not "
"distributed", escapedQueryString)));
Oid relKind = get_rel_relkind(relationId);
if (relKind != RELKIND_INDEX && relKind != RELKIND_PARTITIONED_INDEX)
{
char *relationName = get_rel_name(relationId);
char *escapedRelationName = quote_literal_cstr(relationName);
ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg(
"cannot calculate the size because relation %s "
"is not distributed",
escapedRelationName)));
}
bool missingOk = false;
Oid indexId = relationId;
relationId = IndexGetRelation(relationId, missingOk);
if (!IsCitusTable(relationId))
{
char *tableName = get_rel_name(relationId);
char *escapedTableName = quote_literal_cstr(tableName);
char *indexName = get_rel_name(indexId);
char *escapedIndexName = quote_literal_cstr(indexName);
ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg(
"cannot calculate the size because table %s for "
"index %s is not distributed",
escapedTableName, escapedIndexName)));
}
}
}

View File

@ -2742,6 +2742,25 @@ EnsureCoordinator(void)
}
/*
* EnsurePropagationToCoordinator checks whether the coordinator is added to the
* metadata if we're not on the coordinator.
*
* Given that metadata syncing skips syncing metadata to the coordinator, we need
* too make sure that the coordinator is added to the metadata before propagating
* a command from a worker. For this reason, today we use this only for the commands
* that we support propagating from workers.
*/
void
EnsurePropagationToCoordinator(void)
{
if (!IsCoordinator())
{
EnsureCoordinatorIsInMetadata();
}
}
/*
* EnsureCoordinatorIsInMetadata checks whether the coordinator is added to the
* metadata, which is required for many operations.

View File

@ -24,7 +24,7 @@
#include "distributed/citus_safe_lib.h"
#include "distributed/metadata/dependency.h"
#include "distributed/metadata/distobject.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/version_compat.h"
#include "nodes/value.h"
#include "utils/array.h"

View File

@ -158,13 +158,6 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
"replication factor.")));
}
/* if we have enough nodes, add an extra placement attempt for backup */
uint32 placementAttemptCount = (uint32) replicationFactor;
if (workerNodeCount > replicationFactor)
{
placementAttemptCount++;
}
/* set shard storage type according to relation type */
char shardStorageType = ShardStorageType(distributedTableId);

View File

@ -15,7 +15,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "c.h"
#include "fmgr.h"

View File

@ -14,7 +14,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "funcapi.h"
#include "libpq-fe.h"

View File

@ -13,7 +13,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "c.h"
#include "fmgr.h"

View File

@ -17,7 +17,7 @@
#include <math.h>
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/htup_details.h"
#include "access/genam.h"

View File

@ -792,7 +792,12 @@ ShardListSizeInBytes(List *shardList, char *workerNodeName, uint32
/* we skip child tables of a partitioned table if this boolean variable is true */
bool optimizePartitionCalculations = true;
/* we're interested in whole table, not a particular index */
Oid indexId = InvalidOid;
StringInfo tableSizeQuery = GenerateSizeQueryOnMultiplePlacements(shardList,
indexId,
TOTAL_RELATION_SIZE,
optimizePartitionCalculations);

View File

@ -180,7 +180,7 @@ ActivePrimaryNodeList(LOCKMODE lockMode)
/*
* ActivePrimaryRemoteNodeList returns a list of all active primary nodes in
* workerNodeHash.
* workerNodeHash except the local one.
*/
List *
ActivePrimaryRemoteNodeList(LOCKMODE lockMode)

View File

@ -11,7 +11,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "catalog/pg_type.h"
#include "distributed/citus_ruleutils.h"

View File

@ -13,7 +13,7 @@
*/
#include "postgres.h"
#include "pg_version_compat.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/cte_inline.h"
#include "nodes/nodeFuncs.h"

View File

@ -9,7 +9,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "funcapi.h"
@ -702,6 +702,7 @@ DissuadePlannerFromUsingPlan(PlannedStmt *plan)
* Arbitrarily high cost, but low enough that it can be added up
* without overflowing by choose_custom_plan().
*/
Assert(plan != NULL);
plan->planTree->total_cost = FLT_MAX / 100000000;
}

View File

@ -9,7 +9,7 @@
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/extended_op_node_utils.h"
#include "distributed/listutils.h"

View File

@ -34,7 +34,7 @@
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/distributed_planner.h"
#include "distributed/insert_select_planner.h"

View File

@ -12,7 +12,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
@ -525,8 +525,16 @@ ShardPlacementForFunctionColocatedWithDistTable(DistObjectCacheEntry *procedure,
if (partitionParam->paramkind == PARAM_EXTERN)
{
/* Don't log a message, we should end up here again without a parameter */
DissuadePlannerFromUsingPlan(plan);
/*
* Don't log a message, we should end up here again without a
* parameter.
* Note that "plan" can be null, for example when a CALL statement
* is prepared.
*/
if (plan)
{
DissuadePlannerFromUsingPlan(plan);
}
return NULL;
}
}

View File

@ -10,7 +10,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "catalog/pg_class.h"
#include "catalog/pg_type.h"

View File

@ -71,7 +71,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "funcapi.h"

View File

@ -9,7 +9,7 @@
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/listutils.h"
#include "distributed/local_executor.h"

View File

@ -29,7 +29,7 @@
#include "distributed/multi_logical_optimizer.h"
#include "distributed/multi_router_planner.h"
#include "distributed/pg_dist_node_metadata.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/query_pushdown_planning.h"
#include "distributed/query_colocation_checker.h"
#include "distributed/repartition_executor.h"

View File

@ -11,7 +11,7 @@
#include "libpq-fe.h"
#include "miscadmin.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/htup_details.h"
#include "access/xact.h"

View File

@ -13,7 +13,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include <limits.h>

View File

@ -13,7 +13,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include <math.h>

View File

@ -14,7 +14,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/heapam.h"
#include "access/nbtree.h"

View File

@ -13,7 +13,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include <math.h>
#include <stdint.h>

View File

@ -13,7 +13,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include <stddef.h>

View File

@ -21,7 +21,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/relation.h"
#include "distributed/multi_logical_planner.h"

View File

@ -21,7 +21,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/citus_clauses.h"
#include "distributed/citus_ruleutils.h"

View File

@ -48,7 +48,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "funcapi.h"

View File

@ -10,7 +10,7 @@
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/colocation_utils.h"
#include "distributed/distributed_planner.h"

View File

@ -66,7 +66,7 @@
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "fmgr.h"

View File

@ -15,7 +15,7 @@
#include "pgstat.h"
#include "libpq-fe.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/genam.h"

View File

@ -1266,7 +1266,7 @@ RegisterCitusConfigVariables(void)
DefineCustomBoolVariable(
"citus.enable_create_database_propagation",
gettext_noop("Enables propagating CREATE DATABASE "
"and DROP DATABASE statements to workers"),
"and DROP DATABASE statements to workers."),
NULL,
&EnableCreateDatabasePropagation,
false,

View File

@ -4,7 +4,7 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text)
RETURNS void
LANGUAGE C
STRICT
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_database_command$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS
'run a database command without transaction block restrictions';

View File

@ -4,7 +4,7 @@
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text)
RETURNS void
LANGUAGE C
STRICT
VOLATILE
AS 'MODULE_PATHNAME', $$citus_internal_database_command$$;
COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS
'run a database command without transaction block restrictions';

View File

@ -19,7 +19,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "pg_version_compat.h"

View File

@ -12,7 +12,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "c.h"
#include "fmgr.h"

View File

@ -90,6 +90,28 @@ activate_node_snapshot(PG_FUNCTION_ARGS)
}
/*
* IsMetadataSynced checks the workers to see if all workers with metadata are
* synced.
*/
static bool
IsMetadataSynced(void)
{
List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock);
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerList)
{
if (workerNode->hasMetadata && !workerNode->metadataSynced)
{
return false;
}
}
return true;
}
/*
* wait_until_metadata_sync waits until the maintenance daemon does a metadata
* sync, or times out.
@ -99,19 +121,10 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS)
{
uint32 timeout = PG_GETARG_UINT32(0);
List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock);
bool waitNotifications = false;
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerList)
{
/* if already has metadata, no need to do it again */
if (workerNode->hasMetadata && !workerNode->metadataSynced)
{
waitNotifications = true;
break;
}
}
/* First we start listening. */
MultiConnection *connection = GetNodeConnection(FORCE_NEW_CONNECTION,
LOCAL_HOST_NAME, PostPortNumber);
ExecuteCriticalRemoteCommand(connection, "LISTEN " METADATA_SYNC_CHANNEL);
/*
* If all the metadata nodes have already been synced, we should not wait.
@ -119,15 +132,12 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS)
* the notification and we'd wait unnecessarily here. Worse, the test outputs
* might be inconsistent across executions due to the warning.
*/
if (!waitNotifications)
if (IsMetadataSynced())
{
CloseConnection(connection);
PG_RETURN_VOID();
}
MultiConnection *connection = GetNodeConnection(FORCE_NEW_CONNECTION,
LOCAL_HOST_NAME, PostPortNumber);
ExecuteCriticalRemoteCommand(connection, "LISTEN " METADATA_SYNC_CHANNEL);
int waitFlags = WL_SOCKET_READABLE | WL_TIMEOUT | WL_POSTMASTER_DEATH;
int waitResult = WaitLatchOrSocket(NULL, waitFlags, PQsocket(connection->pgConn),
timeout, 0);
@ -139,7 +149,7 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS)
{
ClearResults(connection, true);
}
else if (waitResult & WL_TIMEOUT)
else if (waitResult & WL_TIMEOUT && !IsMetadataSynced())
{
elog(WARNING, "waiting for metadata sync timed out");
}

View File

@ -12,7 +12,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "c.h"
#include "fmgr.h"

View File

@ -12,7 +12,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "miscadmin.h"
#include "unistd.h"

View File

@ -15,7 +15,7 @@
*/
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "miscadmin.h"

View File

@ -14,7 +14,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "miscadmin.h"
#include "libpq-fe.h"

View File

@ -34,6 +34,12 @@
#include "utils/memutils.h"
#include "utils/builtins.h"
static void SendCommandToRemoteMetadataNodesParams(const char *command,
const char *user, int parameterCount,
const Oid *parameterTypes,
const char *const *parameterValues);
static void SendBareCommandListToMetadataNodesInternal(List *commandList,
TargetWorkerSet targetWorkerSet);
static void SendCommandToMetadataWorkersParams(const char *command,
const char *user, int parameterCount,
const Oid *parameterTypes,
@ -150,6 +156,74 @@ SendCommandListToWorkersWithMetadata(List *commands)
}
/*
* SendCommandToRemoteNodesWithMetadata sends a command to remote nodes in
* parallel. Commands are committed on the nodes when the local transaction
* commits.
*/
void
SendCommandToRemoteNodesWithMetadata(const char *command)
{
SendCommandToRemoteMetadataNodesParams(command, CurrentUserName(),
0, NULL, NULL);
}
/*
* SendCommandToRemoteNodesWithMetadataViaSuperUser sends a command to remote
* nodes in parallel by opening a super user connection. Commands are committed
* on the nodes when the local transaction commits. The connection are made as
* the extension owner to ensure write access to the Citus metadata tables.
*
* Since we prevent to open superuser connections for metadata tables, it is
* discouraged to use it. Consider using it only for propagating pg_dist_object
* tuples for dependent objects.
*/
void
SendCommandToRemoteNodesWithMetadataViaSuperUser(const char *command)
{
SendCommandToRemoteMetadataNodesParams(command, CitusExtensionOwnerName(),
0, NULL, NULL);
}
/*
* SendCommandListToRemoteNodesWithMetadata sends all commands to remote nodes
* with the current user. See `SendCommandToRemoteNodesWithMetadata`for details.
*/
void
SendCommandListToRemoteNodesWithMetadata(List *commands)
{
char *command = NULL;
foreach_ptr(command, commands)
{
SendCommandToRemoteNodesWithMetadata(command);
}
}
/*
* SendCommandToRemoteMetadataNodesParams is a wrapper around
* SendCommandToWorkersParamsInternal() that can be used to send commands
* to remote metadata nodes.
*/
static void
SendCommandToRemoteMetadataNodesParams(const char *command,
const char *user, int parameterCount,
const Oid *parameterTypes,
const char *const *parameterValues)
{
/* use METADATA_NODES so that ErrorIfAnyMetadataNodeOutOfSync checks local node as well */
List *workerNodeList = TargetWorkerSetNodeList(METADATA_NODES,
RowShareLock);
ErrorIfAnyMetadataNodeOutOfSync(workerNodeList);
SendCommandToWorkersParamsInternal(REMOTE_METADATA_NODES, command, user,
parameterCount, parameterTypes, parameterValues);
}
/*
* TargetWorkerSetNodeList returns a list of WorkerNode's that satisfies the
* TargetWorkerSet.
@ -162,17 +236,29 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode)
{
workerNodeList = ActivePrimaryNodeList(lockMode);
}
else
else if (targetWorkerSet == REMOTE_NODES || targetWorkerSet == REMOTE_METADATA_NODES)
{
workerNodeList = ActivePrimaryRemoteNodeList(lockMode);
}
else if (targetWorkerSet == NON_COORDINATOR_METADATA_NODES ||
targetWorkerSet == NON_COORDINATOR_NODES)
{
workerNodeList = ActivePrimaryNonCoordinatorNodeList(lockMode);
}
else
{
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid target worker set: %d", targetWorkerSet)));
}
List *result = NIL;
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodeList)
{
if ((targetWorkerSet == NON_COORDINATOR_METADATA_NODES || targetWorkerSet ==
METADATA_NODES) &&
if ((targetWorkerSet == NON_COORDINATOR_METADATA_NODES ||
targetWorkerSet == REMOTE_METADATA_NODES ||
targetWorkerSet == METADATA_NODES) &&
!workerNode->hasMetadata)
{
continue;
@ -186,16 +272,42 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode)
/*
* SendBareCommandListToMetadataWorkers sends a list of commands to metadata
* workers in serial. Commands are committed immediately: new connections are
* always used and no transaction block is used (hence "bare"). The connections
* are made as the extension owner to ensure write access to the Citus metadata
* tables. Primarly useful for INDEX commands using CONCURRENTLY.
* SendBareCommandListToRemoteMetadataNodes is a wrapper around
* SendBareCommandListToMetadataNodesInternal() that can be used to send
* bare commands to remote metadata nodes.
*/
void
SendBareCommandListToRemoteMetadataNodes(List *commandList)
{
SendBareCommandListToMetadataNodesInternal(commandList,
REMOTE_METADATA_NODES);
}
/*
* SendBareCommandListToMetadataWorkers is a wrapper around
* SendBareCommandListToMetadataNodesInternal() that can be used to send
* bare commands to metadata workers.
*/
void
SendBareCommandListToMetadataWorkers(List *commandList)
{
TargetWorkerSet targetWorkerSet = NON_COORDINATOR_METADATA_NODES;
SendBareCommandListToMetadataNodesInternal(commandList,
NON_COORDINATOR_METADATA_NODES);
}
/*
* SendBareCommandListToMetadataNodesInternal sends a list of commands to given
* target worker set in serial. Commands are committed immediately: new connections
* are always used and no transaction block is used (hence "bare"). The connections
* are made as the extension owner to ensure write access to the Citus metadata
* tables. Primarly useful for INDEX commands using CONCURRENTLY.
*/
static void
SendBareCommandListToMetadataNodesInternal(List *commandList,
TargetWorkerSet targetWorkerSet)
{
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, RowShareLock);
char *nodeUser = CurrentUserName();

View File

@ -10,7 +10,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "catalog/pg_type.h"
#include "distributed/citus_nodes.h"

View File

@ -18,7 +18,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include <ctype.h>

View File

@ -14,7 +14,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "safe_lib.h"

View File

@ -18,7 +18,7 @@
* it otherwise we get warnings about redefining this value. This needs to be
* done before including libpq.h.
*/
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "distributed/connection_management.h"
#include "distributed/memutils.h"

View File

@ -12,7 +12,7 @@
#include "postgres.h"
#include "distributed/pg_version_constants.h"
#include "pg_version_constants.h"
#include "access/genam.h"
#include "access/htup_details.h"

Some files were not shown because too many files have changed in this diff Show More