diff --git a/.devcontainer/src/test/regress/Pipfile.lock b/.devcontainer/src/test/regress/Pipfile.lock index 15cb7ecda..bdb42a1c3 100644 --- a/.devcontainer/src/test/regress/Pipfile.lock +++ b/.devcontainer/src/test/regress/Pipfile.lock @@ -127,72 +127,61 @@ }, "cffi": { "hashes": [ - "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5", - "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef", - "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104", - "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426", - "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405", - "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375", - "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a", - "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e", - "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc", - "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf", - "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185", - "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497", - "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3", - "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35", - "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c", - "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83", - "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21", - "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca", - "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984", - "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac", - "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd", - "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee", - "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a", - "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2", - "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192", - "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7", - "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585", - "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f", - "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e", - "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27", - "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b", - "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e", - "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e", - "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d", - "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c", - "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415", - "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82", - "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02", - "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314", - "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325", - "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c", - "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3", - "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914", - "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045", - "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d", - "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9", - "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5", - "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2", - "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c", - "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3", - "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2", - "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8", - "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d", - "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d", - "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9", - "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162", - "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76", - "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4", - "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e", - "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9", - "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6", - "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b", - "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01", - "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0" + "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc", + "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a", + "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417", + "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab", + "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520", + "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36", + "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743", + "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8", + "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed", + "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684", + "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56", + "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324", + "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d", + "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235", + "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e", + "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088", + "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000", + "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7", + "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e", + "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673", + "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c", + "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe", + "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2", + "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098", + "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8", + "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a", + "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0", + "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b", + "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896", + "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e", + "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9", + "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2", + "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b", + "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6", + "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404", + "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f", + "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0", + "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4", + "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc", + "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936", + "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba", + "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872", + "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb", + "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614", + "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1", + "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d", + "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969", + "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b", + "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4", + "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627", + "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", + "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" ], - "version": "==1.15.1" + "markers": "python_version >= '3.8'", + "version": "==1.16.0" }, "click": { "hashes": [ @@ -420,78 +409,78 @@ "mitmproxy": { "editable": true, "git": "https://github.com/citusdata/mitmproxy.git", - "markers": "python_version >= '3.10'", + "markers": "python_version >= '3.9'", "ref": "2fd18ef051b987925a36337ab1d61aa674353b44" }, "msgpack": { "hashes": [ - "sha256:00ce5f827d4f26fc094043e6f08b6069c1b148efa2631c47615ae14fb6cafc89", - "sha256:04450e4b5e1e662e7c86b6aafb7c230af9334fd0becf5e6b80459a507884241c", - "sha256:099c3d8a027367e1a6fc55d15336f04ff65c60c4f737b5739f7db4525c65fe9e", - "sha256:102cfb54eaefa73e8ca1e784b9352c623524185c98e057e519545131a56fb0af", - "sha256:14db7e1b7a7ed362b2f94897bf2486c899c8bb50f6e34b2db92fe534cdab306f", - "sha256:159cfec18a6e125dd4723e2b1de6f202b34b87c850fb9d509acfd054c01135e9", - "sha256:1dc67b40fe81217b308ab12651adba05e7300b3a2ccf84d6b35a878e308dd8d4", - "sha256:1f0e36a5fa7a182cde391a128a64f437657d2b9371dfa42eda3436245adccbf5", - "sha256:229ccb6713c8b941eaa5cf13dc7478eba117f21513b5893c35e44483e2f0c9c8", - "sha256:25d3746da40f3c8c59c3b1d001e49fd2aa17904438f980d9a391370366df001e", - "sha256:32c0aff31f33033f4961abc01f78497e5e07bac02a508632aef394b384d27428", - "sha256:33bbf47ea5a6ff20c23426106e81863cdbb5402de1825493026ce615039cc99d", - "sha256:35ad5aed9b52217d4cea739d0ea3a492a18dd86fecb4b132668a69f27fb0363b", - "sha256:3910211b0ab20be3a38e0bb944ed45bd4265d8d9f11a3d1674b95b298e08dd5c", - "sha256:3b5658b1f9e486a2eec4c0c688f213a90085b9cf2fec76ef08f98fdf6c62f4b9", - "sha256:40b801b768f5a765e33c68f30665d3c6ee1c8623a2d2bb78e6e59f2db4e4ceb7", - "sha256:47275ff73005a3e5e146e50baa2378e1730cba6e292f0222bc496a8e4c4adfc8", - "sha256:55bb4a1bf94e39447bc08238a2fb8a767460388a8192f67c103442eb36920887", - "sha256:5b08676a17e3f791daad34d5fcb18479e9c85e7200d5a17cbe8de798643a7e37", - "sha256:5b16344032a27b2ccfd341f89dadf3e4ef6407d91e4b93563c14644a8abb3ad7", - "sha256:5c5e05e4f5756758c58a8088aa10dc70d851c89f842b611fdccfc0581c1846bc", - "sha256:5cd67674db3c73026e0a2c729b909780e88bd9cbc8184256f9567640a5d299a8", - "sha256:5e7fae9ca93258a956551708cf60dc6c8145574e32ce8c8c4d894e63bcb04341", - "sha256:61213482b5a387ead9e250e9e3cb290292feca39dc83b41c3b1b7b8ffc8d8ecb", - "sha256:619a63753ba9e792fe3c6c0fc2b9ee2cfbd92153dd91bee029a89a71eb2942cd", - "sha256:652e4b7497825b0af6259e2c54700e6dc33d2fc4ed92b8839435090d4c9cc911", - "sha256:68569509dd015fcdd1e6b2b3ccc8c51fd27d9a97f461ccc909270e220ee09685", - "sha256:6a01a072b2219b65a6ff74df208f20b2cac9401c60adb676ee34e53b4c651077", - "sha256:70843788c85ca385846a2d2f836efebe7bb2687ca0734648bf5c9dc6c55602d2", - "sha256:76820f2ece3b0a7c948bbb6a599020e29574626d23a649476def023cbb026787", - "sha256:7a006c300e82402c0c8f1ded11352a3ba2a61b87e7abb3054c845af2ca8d553c", - "sha256:7baf16fd8908a025c4a8d7b699103e72d41f967e2aee5a2065432bcdbd9fd06e", - "sha256:7ecf431786019a7bfedc28281531d706627f603e3691d64eccdbce3ecd353823", - "sha256:885de1ed5ea01c1bfe0a34c901152a264c3c1f8f1d382042b92ea354bd14bb0e", - "sha256:88cdb1da7fdb121dbb3116910722f5acab4d6e8bfcacab8fafe27e2e7744dc6a", - "sha256:95ade0bd4cf69e04e8b8f8ec2d197d9c9c4a9b6902e048dc7456bf6d82e12a80", - "sha256:9b88dc97ba86c96b964c3745a445d9a65f76fe21955a953064fe04adb63e9367", - "sha256:9c780d992f5d734432726b92a0c87bf1857c3d85082a8dea29cbf56e44a132b3", - "sha256:9f85200ea102276afdd3749ca94747f057bbb868d1c52921ee2446730b508d0f", - "sha256:a1cf98afa7ad5e7012454ca3fde254499a13f9d92fd50cb46118118a249a1355", - "sha256:a635aecf1047255576dbb0927cbf9a7aa4a68e9d54110cc3c926652d18f144e0", - "sha256:ae97504958d0bc58c1152045c170815d5c4f8af906561ce044b6358b43d0c97e", - "sha256:b06a5095a79384760625b5de3f83f40b3053a385fb893be8a106fbbd84c14980", - "sha256:b5c8dd9a386a66e50bd7fa22b7a49fb8ead2b3574d6bd69eb1caced6caea0803", - "sha256:bae6c561f11b444b258b1b4be2bdd1e1cf93cd1d80766b7e869a79db4543a8a8", - "sha256:bbb4448a05d261fae423d5c0b0974ad899f60825bc77eabad5a0c518e78448c2", - "sha256:bd6af61388be65a8701f5787362cb54adae20007e0cc67ca9221a4b95115583b", - "sha256:bf652839d16de91fe1cfb253e0a88db9a548796939533894e07f45d4bdf90a5f", - "sha256:d6d25b8a5c70e2334ed61a8da4c11cd9b97c6fbd980c406033f06e4463fda006", - "sha256:da057d3652e698b00746e47f06dbb513314f847421e857e32e1dc61c46f6c052", - "sha256:e0ed35d6d6122d0baa9a1b59ebca4ee302139f4cfb57dab85e4c73ab793ae7ed", - "sha256:e36560d001d4ba469d469b02037f2dd404421fd72277d9474efe9f03f83fced5", - "sha256:f4321692e7f299277e55f322329b2c972d93bb612d85f3fda8741bec5c6285ce", - "sha256:f75114c05ec56566da6b55122791cf5bb53d5aada96a98c016d6231e03132f76", - "sha256:fb4571efe86545b772a4630fee578c213c91cbcfd20347806e47fd4e782a18fe", - "sha256:fc97aa4b4fb928ff4d3b74da7c30b360d0cb3ede49a5a6e1fd9705f49aea1deb" + "sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862", + "sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d", + "sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3", + "sha256:1e2d69948e4132813b8d1131f29f9101bc2c915f26089a6d632001a5c1349672", + "sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0", + "sha256:27dcd6f46a21c18fa5e5deed92a43d4554e3df8d8ca5a47bf0615d6a5f39dbc9", + "sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee", + "sha256:3476fae43db72bd11f29a5147ae2f3cb22e2f1a91d575ef130d2bf49afd21c46", + "sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524", + "sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819", + "sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc", + "sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc", + "sha256:3e7bf4442b310ff154b7bb9d81eb2c016b7d597e364f97d72b1acc3817a0fdc1", + "sha256:3f0c8c6dfa6605ab8ff0611995ee30d4f9fcff89966cf562733b4008a3d60d82", + "sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81", + "sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6", + "sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d", + "sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2", + "sha256:52700dc63a4676669b341ba33520f4d6e43d3ca58d422e22ba66d1736b0a6e4c", + "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87", + "sha256:576eb384292b139821c41995523654ad82d1916da6a60cff129c715a6223ea84", + "sha256:5b0bf0effb196ed76b7ad883848143427a73c355ae8e569fa538365064188b8e", + "sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95", + "sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f", + "sha256:6d4c80667de2e36970ebf74f42d1088cc9ee7ef5f4e8c35eee1b40eafd33ca5b", + "sha256:730076207cb816138cf1af7f7237b208340a2c5e749707457d70705715c93b93", + "sha256:7687e22a31e976a0e7fc99c2f4d11ca45eff652a81eb8c8085e9609298916dcf", + "sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61", + "sha256:84b0daf226913133f899ea9b30618722d45feffa67e4fe867b0b5ae83a34060c", + "sha256:85765fdf4b27eb5086f05ac0491090fc76f4f2b28e09d9350c31aac25a5aaff8", + "sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d", + "sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c", + "sha256:98bbd754a422a0b123c66a4c341de0474cad4a5c10c164ceed6ea090f3563db4", + "sha256:993584fc821c58d5993521bfdcd31a4adf025c7d745bbd4d12ccfecf695af5ba", + "sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415", + "sha256:b291f0ee7961a597cbbcc77709374087fa2a9afe7bdb6a40dbbd9b127e79afee", + "sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d", + "sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9", + "sha256:bdf38ba2d393c7911ae989c3bbba510ebbcdf4ecbdbfec36272abe350c454075", + "sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f", + "sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7", + "sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681", + "sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329", + "sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1", + "sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf", + "sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c", + "sha256:e45ae4927759289c30ccba8d9fdce62bb414977ba158286b5ddaf8df2cddb5c5", + "sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b", + "sha256:ebbbba226f0a108a7366bf4b59bf0f30a12fd5e75100c630267d94d7f0ad20e5", + "sha256:ec79ff6159dffcc30853b2ad612ed572af86c92b5168aa3fc01a67b0fa40665e", + "sha256:f0936e08e0003f66bfd97e74ee530427707297b0d0361247e9b4f59ab78ddc8b", + "sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad", + "sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd", + "sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7", + "sha256:f9a7c509542db4eceed3dcf21ee5267ab565a83555c9b88a8109dcecc4709002", + "sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc" ], "markers": "python_version >= '3.8'", - "version": "==1.0.6" + "version": "==1.0.7" }, "packaging": { "hashes": [ - "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61", - "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f" + "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", + "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" ], "markers": "python_version >= '3.7'", - "version": "==23.1" + "version": "==23.2" }, "passlib": { "hashes": [ @@ -698,6 +687,62 @@ "markers": "python_version >= '3'", "version": "==0.17.16" }, + "ruamel.yaml.clib": { + "hashes": [ + "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d", + "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001", + "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462", + "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9", + "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b", + "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b", + "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615", + "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15", + "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b", + "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9", + "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675", + "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1", + "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899", + "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7", + "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7", + "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312", + "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa", + "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f", + "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91", + "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa", + "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b", + "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3", + "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334", + "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5", + "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3", + "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe", + "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3", + "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed", + "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337", + "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880", + "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d", + "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248", + "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d", + "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279", + "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf", + "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512", + "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069", + "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb", + "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942", + "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d", + "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31", + "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92", + "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd", + "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5", + "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28", + "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d", + "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1", + "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2", + "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875", + "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412" + ], + "markers": "python_version < '3.10' and platform_python_implementation == 'CPython'", + "version": "==0.2.8" + }, "sortedcontainers": { "hashes": [ "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", @@ -746,11 +791,12 @@ }, "werkzeug": { "hashes": [ - "sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8", - "sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528" + "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc", + "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10" ], + "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==2.3.7" + "version": "==3.0.1" }, "wsproto": { "hashes": [ @@ -906,11 +952,11 @@ }, "packaging": { "hashes": [ - "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61", - "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f" + "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", + "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" ], "markers": "python_version >= '3.7'", - "version": "==23.1" + "version": "==23.2" }, "pathspec": { "hashes": [ @@ -922,19 +968,19 @@ }, "platformdirs": { "hashes": [ - "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d", - "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d" + "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3", + "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e" ], "markers": "python_version >= '3.7'", - "version": "==3.10.0" + "version": "==3.11.0" }, "pycodestyle": { "hashes": [ - "sha256:259bcc17857d8a8b3b4a2327324b79e5f020a13c16074670f9c8c8f872ea76d0", - "sha256:5d1013ba8dc7895b548be5afb05740ca82454fd899971563d2ef625d090326f8" + "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f", + "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67" ], "markers": "python_version >= '3.8'", - "version": "==2.11.0" + "version": "==2.11.1" }, "pyflakes": { "hashes": [ diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 804cd0bb9..e938e3904 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -16,6 +16,7 @@ on: - "release-*" pull_request: types: [opened, reopened,synchronize] + merge_group: jobs: # Since GHA does not interpolate env varibles in matrix context, we need to # define them in a separate job and use them in other jobs. @@ -496,7 +497,6 @@ jobs: matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} steps: - uses: actions/checkout@v3.5.0 - - uses: actions/download-artifact@v3.0.1 - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |- diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index 8690fce1f..51bd82503 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -3,6 +3,7 @@ name: Build tests in packaging images on: pull_request: types: [opened, reopened,synchronize] + merge_group: workflow_dispatch: diff --git a/CHANGELOG.md b/CHANGELOG.md index 02fc91d04..686e78dd1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +### citus v12.1.1 (November 9, 2023) ### + +* Fixes leaking of memory and memory contexts in Citus foreign key cache + (#7219) + +* Makes sure to disallow creating a replicated distributed table concurrently + (#7236) + ### citus v12.1.0 (September 12, 2023) ### * Adds support for PostgreSQL 16.0 (#7173) diff --git a/src/backend/columnar/columnar_compression.c b/src/backend/columnar/columnar_compression.c index 98a175b06..50cdfb01b 100644 --- a/src/backend/columnar/columnar_compression.c +++ b/src/backend/columnar/columnar_compression.c @@ -18,7 +18,7 @@ #include "lib/stringinfo.h" #include "columnar/columnar_compression.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #if HAVE_CITUS_LIBLZ4 #include diff --git a/src/backend/columnar/columnar_debug.c b/src/backend/columnar/columnar_debug.c index cbb0d554f..c60919513 100644 --- a/src/backend/columnar/columnar_debug.c +++ b/src/backend/columnar/columnar_debug.c @@ -15,7 +15,7 @@ #include "access/table.h" #include "catalog/pg_am.h" #include "catalog/pg_type.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" #include "storage/fd.h" #include "storage/smgr.h" diff --git a/src/backend/distributed/README.md b/src/backend/distributed/README.md index 7c4f43add..225b1f962 100644 --- a/src/backend/distributed/README.md +++ b/src/backend/distributed/README.md @@ -1723,11 +1723,11 @@ Merge command the same principles as INSERT .. SELECT processing. However, due t # DDL -DDL commands are primarily handled via the ProcessUtility hook, which gets the parse tree of the DDL command. For supported DDL commands, we always follow the same sequence of steps: +DDL commands are primarily handled via the citus_ProcessUtility hook, which gets the parse tree of the DDL command. For supported DDL commands, we always follow the same sequence of steps: 1. Qualify the table names in the parse tree (simplifies deparsing, avoids sensitivity to search_path changes) 2. Pre-process logic -3. Call original ProcessUtility to execute the command on the local shell table +3. Call original previous ProcessUtility to execute the command on the local shell table 4. Post-process logic 5. Execute command on all other nodes 6. Execute command on shards (in case of table DDL) @@ -1749,6 +1749,66 @@ The reason for handling dependencies and deparsing in post-process step is that Not all table DDL is currently deparsed. In that case, the original command sent by the client is used. That is a shortcoming in our DDL logic that causes user-facing issues and should be addressed. We do not directly construct a separate DDL command for each shard. Instead, we call the `worker_apply_shard_ddl_command(shardid bigint, ddl_command text)` function which parses the DDL command, replaces the table names with shard names in the parse tree according to the shard ID, and then executes the command. That also has some shortcomings, because we cannot support more complex DDL commands in this manner (e.g. adding multiple foreign keys). Ideally, all DDL would be deparsed, and for table DDL the deparsed query string would have shard names, similar to regular queries. +`markDistributed` is used to indicate whether we add a record to `pg_dist_object` to mark the object as "distributed". + +## Defining a new DDL command + +All commands that are propagated by Citus should be defined in DistributeObjectOps struct. Below is a sample DistributeObjectOps for ALTER DATABASE command that is defined in [distribute_object_ops.c](commands/distribute_object_ops.c) file. + +```c +static DistributeObjectOps Database_Alter = { + .deparse = DeparseAlterDatabaseStmt, + .qualify = NULL, + .preprocess = PreprocessAlterDatabaseStmt, + .postprocess = NULL, + .objectType = OBJECT_DATABASE, + .operationType = DIST_OPS_ALTER, + .address = NULL, + .markDistributed = false, +}; +``` + +Each field in the struct is documented in the comments within the `DistributeObjectOps`. When defining a new DDL command, follow these guidelines: + +- **Returning tasks for `preprocess` and `postprocess`**: Ensure that either `preprocess` or `postprocess` returns a list of "DDLJob"s. If both functions return non-empty lists, then you would get an assertion failure. + +- **Generic `preprocess` and `postprocess` methods**: The generic methods, `PreprocessAlterDistributedObjectStmt` and `PostprocessAlterDistributedObjectStmt`, serve as generic pre and post methods utilized for various statements. Both of these methods find application in distributed object operations. + + - The `PreprocessAlterDistributedObjectStmt` method carries out the following operations: + - Performs a qualification operation. + - Deparses the statement and generates a task list. + + - As for the `PostprocessAlterDistributedObjectStmt` method, it: + - Invokes the `EnsureAllObjectDependenciesExistOnAllNodes` function to propagate missing dependencies, both on the coordinator and the worker. + + - Before defining new `preprocess` or `postprocess` methods, it is advisable to assess whether the generic methods can be employed in your specific case. + + +- **`deparse`**: When propagating the command to worker nodes, make sure to define `deparse`. This is necessary because it generates a query string for each worker node. + +- **`markDistributed`**: Set this flag to true if you want to add a record to the `pg_dist_object` table. This is particularly important for `CREATE` statements when introducing a new object to the system. + +- **`address`**: If `markDistributed` is set to true, you must define the `address`. Failure to do so will result in a runtime error. The `address` is required to identify the fields that will be stored in the `pg_dist_object` table. + +- **`markDistributed` usage in `DROP` Statements**: Please note that `markDistributed` does not apply to `DROP` statements. For `DROP` statements, instead you need to call `UnmarkObjectDistributed()` for the object either in `preprocess` or `postprocess`. Otherwise, state records in ``pg_dist_object`` table will cause errors in UDF calls such as ``citus_add_node()``, which will try to copy the non-existent db object. + +- **`qualify`**: The `qualify` function is used to qualify the objects based on their schemas in the parse tree. It is employed to prevent sensitivity to changes in the `search_path` on worker nodes. Note that it is not mandatory to define this function for all DDL commands. It is only required for commands that involve objects that are bound to schemas, such as; tables, types, functions and so on. + +After defining the `DistributeObjectOps` structure, this structure should be implemented in the `GetDistributeObjectOps()` function as shown below: + +```c +// Example implementation in C code +const DistributeObjectOps * +GetDistributeObjectOps(Node *node) +{ + switch (nodeTag(node)) + { + case T_AlterDatabaseStmt: + { + return &Database_Alter; + } +... +``` ## Object & dependency propagation diff --git a/src/backend/distributed/commands/call.c b/src/backend/distributed/commands/call.c index b2f0bfca1..12a1d93b8 100644 --- a/src/backend/distributed/commands/call.c +++ b/src/backend/distributed/commands/call.c @@ -13,7 +13,7 @@ #include "postgres.h" #include "funcapi.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "catalog/pg_proc.h" #include "commands/defrem.h" diff --git a/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c b/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c index 1102a3a51..9b22fb161 100644 --- a/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c +++ b/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/xact.h" #include "catalog/pg_constraint.h" diff --git a/src/backend/distributed/commands/citus_global_signal.c b/src/backend/distributed/commands/citus_global_signal.c index 8183d6673..b1f4cf187 100644 --- a/src/backend/distributed/commands/citus_global_signal.c +++ b/src/backend/distributed/commands/citus_global_signal.c @@ -11,7 +11,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/backend_data.h" #include "distributed/metadata_cache.h" diff --git a/src/backend/distributed/commands/cluster.c b/src/backend/distributed/commands/cluster.c index 92fcb3ec6..cdae6fc08 100644 --- a/src/backend/distributed/commands/cluster.c +++ b/src/backend/distributed/commands/cluster.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "commands/defrem.h" diff --git a/src/backend/distributed/commands/collation.c b/src/backend/distributed/commands/collation.c index 023197e15..521ce4b3d 100644 --- a/src/backend/distributed/commands/collation.c +++ b/src/backend/distributed/commands/collation.c @@ -27,7 +27,7 @@ #include "distributed/multi_executor.h" #include "distributed/relation_access_tracking.h" #include "distributed/worker_create_or_replace.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/worker_manager.h" #include "parser/parse_type.h" #include "utils/builtins.h" diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 1e89c6b93..768e20b73 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -11,7 +11,7 @@ #include "postgres.h" #include "miscadmin.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/commands/utility_hook.h" #include "access/genam.h" diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index 977efb145..e309ee86c 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -40,14 +40,14 @@ static char * DropTableIfExistsCommand(Oid relationId); /* * EnsureDependenciesExistOnAllNodes finds all the dependencies that we support and makes - * sure these are available on all workers. If not available they will be created on the - * workers via a separate session that will be committed directly so that the objects are + * sure these are available on all nodes. If not available they will be created on the + * nodes via a separate session that will be committed directly so that the objects are * visible to potentially multiple sessions creating the shards. * * Note; only the actual objects are created via a separate session, the records to * pg_dist_object are created in this session. As a side effect the objects could be - * created on the workers without a catalog entry. Updates to the objects on the coordinator - * are not propagated to the workers until the record is visible on the coordinator. + * created on the nodes without a catalog entry. Updates to the objects on local node + * are not propagated to the remote nodes until the record is visible on local node. * * This is solved by creating the dependencies in an idempotent manner, either via * postgres native CREATE IF NOT EXISTS, or citus helper functions. @@ -95,7 +95,7 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) * either get it now, or get it in citus_add_node after this transaction finishes and * the pg_dist_object record becomes visible. */ - List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(RowShareLock); + List *remoteNodeList = ActivePrimaryRemoteNodeList(RowShareLock); /* * Lock dependent objects explicitly to make sure same DDL command won't be sent @@ -127,12 +127,12 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) */ if (HasAnyDependencyInPropagatedObjects(target)) { - SendCommandListToWorkersWithMetadata(ddlCommands); + SendCommandListToRemoteNodesWithMetadata(ddlCommands); } else { WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_ptr(workerNode, remoteNodeList) { const char *nodeName = workerNode->workerName; uint32 nodePort = workerNode->workerPort; @@ -144,8 +144,8 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) } /* - * We do this after creating the objects on the workers, we make sure - * that objects have been created on worker nodes before marking them + * We do this after creating the objects on remote nodes, we make sure + * that objects have been created on remote nodes before marking them * distributed, so MarkObjectDistributed wouldn't fail. */ foreach_ptr(dependency, dependenciesWithCommands) diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index f3ef31667..c617edfa4 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -14,7 +14,7 @@ #include "distributed/commands.h" #include "distributed/deparser.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/version_compat.h" #include "distributed/commands/utility_hook.h" diff --git a/src/backend/distributed/commands/foreign_constraint.c b/src/backend/distributed/commands/foreign_constraint.c index 7c2d50f44..709287c56 100644 --- a/src/backend/distributed/commands/foreign_constraint.c +++ b/src/backend/distributed/commands/foreign_constraint.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/htup_details.h" #include "access/sysattr.h" diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 01911677d..701041673 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -21,7 +21,7 @@ #include "miscadmin.h" #include "funcapi.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/htup_details.h" @@ -978,7 +978,6 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace) char *argmodes = NULL; int insertorderbyat = -1; int argsprinted = 0; - int inputargno = 0; HeapTuple proctup = SearchSysCache1(PROCOID, funcOid); if (!HeapTupleIsValid(proctup)) @@ -1058,7 +1057,6 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace) } } - inputargno++; /* this is a 1-based counter */ if (argsprinted == insertorderbyat) { appendStringInfoString(&buf, " ORDER BY "); diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index 275f253b3..0b5cfb812 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/htup_details.h" #include "access/xact.h" @@ -180,6 +180,8 @@ PreprocessIndexStmt(Node *node, const char *createIndexCommand, return NIL; } + EnsureCoordinator(); + if (createIndexStatement->idxname == NULL) { /* diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index a684d06cc..a5c7a47f4 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -52,7 +52,7 @@ #include /* for htons */ #include -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/htup_details.h" #include "access/htup.h" diff --git a/src/backend/distributed/commands/publication.c b/src/backend/distributed/commands/publication.c index 581f7f874..f225b0fca 100644 --- a/src/backend/distributed/commands/publication.c +++ b/src/backend/distributed/commands/publication.c @@ -175,7 +175,6 @@ BuildCreatePublicationStmt(Oid publicationId) PUBLICATION_PART_ROOT : PUBLICATION_PART_LEAF); Oid relationId = InvalidOid; - int citusTableCount PG_USED_FOR_ASSERTS_ONLY = 0; /* mainly for consistent ordering in test output */ relationIds = SortList(relationIds, CompareOids); @@ -199,11 +198,6 @@ BuildCreatePublicationStmt(Oid publicationId) createPubStmt->tables = lappend(createPubStmt->tables, rangeVar); #endif - - if (IsCitusTable(relationId)) - { - citusTableCount++; - } } /* WITH (publish_via_partition_root = true) option */ diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 754be1a2b..a2da3bf81 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -12,7 +12,7 @@ #include "pg_version_compat.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/heapam.h" #include "access/htup_details.h" @@ -156,7 +156,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString) return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); AlterRoleStmt *stmt = castNode(AlterRoleStmt, node); @@ -185,7 +185,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString) (void *) CreateAlterRoleIfExistsCommand(stmt), ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -231,7 +231,7 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); QualifyTreeNode((Node *) stmt); const char *sql = DeparseTreeNode((Node *) stmt); @@ -240,7 +240,7 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString, (void *) sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList); + return NodeDDLTaskList(REMOTE_NODES, commandList); } @@ -910,7 +910,8 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); + EnsureSequentialModeForRoleDDL(); LockRelationOid(DistNodeRelationId(), RowShareLock); @@ -945,7 +946,7 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString, commands = lappend(commands, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -1041,7 +1042,8 @@ PreprocessDropRoleStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); + EnsureSequentialModeForRoleDDL(); @@ -1053,7 +1055,7 @@ PreprocessDropRoleStmt(Node *node, const char *queryString, sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -1130,7 +1132,7 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); GrantRoleStmt *stmt = castNode(GrantRoleStmt, node); List *allGranteeRoles = stmt->grantee_roles; @@ -1170,7 +1172,7 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString, sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -1181,11 +1183,13 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString, List * PostprocessGrantRoleStmt(Node *node, const char *queryString) { - if (!EnableCreateRolePropagation || !IsCoordinator() || !ShouldPropagate()) + if (!EnableCreateRolePropagation || !ShouldPropagate()) { return NIL; } + EnsurePropagationToCoordinator(); + GrantRoleStmt *stmt = castNode(GrantRoleStmt, node); RoleSpec *role = NULL; @@ -1333,7 +1337,7 @@ PreprocessAlterRoleRenameStmt(Node *node, const char *queryString, Assert(stmt->renameType == OBJECT_ROLE); - EnsureCoordinator(); + EnsurePropagationToCoordinator(); char *sql = DeparseTreeNode((Node *) stmt); @@ -1341,7 +1345,7 @@ PreprocessAlterRoleRenameStmt(Node *node, const char *queryString, (void *) sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } diff --git a/src/backend/distributed/commands/subscription.c b/src/backend/distributed/commands/subscription.c index 59603b559..52519b680 100644 --- a/src/backend/distributed/commands/subscription.c +++ b/src/backend/distributed/commands/subscription.c @@ -17,7 +17,7 @@ #include "commands/defrem.h" #include "distributed/commands.h" #include "distributed/connection_management.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/version_compat.h" #include "libpq-fe.h" #include "nodes/parsenodes.h" diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 500c6f3f2..e8404d38c 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -9,7 +9,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/htup_details.h" #include "access/xact.h" diff --git a/src/backend/distributed/commands/trigger.c b/src/backend/distributed/commands/trigger.c index 7577dfd31..0ec8287f5 100644 --- a/src/backend/distributed/commands/trigger.c +++ b/src/backend/distributed/commands/trigger.c @@ -9,7 +9,7 @@ *------------------------------------------------------------------------- */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/table.h" diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index 02e5f0dee..ccb7bf528 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -43,7 +43,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/htup_details.h" diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 579b6979e..afc8fa9fd 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -25,7 +25,7 @@ *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "postgres.h" #include "miscadmin.h" @@ -708,9 +708,9 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt, } else if (IsA(parsetree, CreateRoleStmt) && !EnableCreateRolePropagation) { - ereport(NOTICE, (errmsg("not propagating CREATE ROLE/USER commands to worker" + ereport(NOTICE, (errmsg("not propagating CREATE ROLE/USER commands to other" " nodes"), - errhint("Connect to worker nodes directly to manually create all" + errhint("Connect to other nodes directly to manually create all" " necessary users and roles."))); } @@ -1106,16 +1106,17 @@ IsDropSchemaOrDB(Node *parsetree) * each shard placement and COMMIT/ROLLBACK is handled by * CoordinatedTransactionCallback function. * - * The function errors out if the node is not the coordinator or if the DDL is on - * a partitioned table which has replication factor > 1. - * + * The function errors out if the DDL is on a partitioned table which has replication + * factor > 1, or if the the coordinator is not added into metadata and we're on a + * worker node because we want to make sure that distributed DDL jobs are executed + * on the coordinator node too. See EnsurePropagationToCoordinator() for more details. */ void ExecuteDistributedDDLJob(DDLJob *ddlJob) { bool shouldSyncMetadata = false; - EnsureCoordinator(); + EnsurePropagationToCoordinator(); ObjectAddress targetObjectAddress = ddlJob->targetObjectAddress; @@ -1139,23 +1140,24 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) { if (shouldSyncMetadata) { - SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); + SendCommandToRemoteNodesWithMetadata(DISABLE_DDL_PROPAGATION); char *currentSearchPath = CurrentSearchPath(); /* - * Given that we're relaying the query to the worker nodes directly, + * Given that we're relaying the query to the remote nodes directly, * we should set the search path exactly the same when necessary. */ if (currentSearchPath != NULL) { - SendCommandToWorkersWithMetadata( + SendCommandToRemoteNodesWithMetadata( psprintf("SET LOCAL search_path TO %s;", currentSearchPath)); } if (ddlJob->metadataSyncCommand != NULL) { - SendCommandToWorkersWithMetadata((char *) ddlJob->metadataSyncCommand); + SendCommandToRemoteNodesWithMetadata( + (char *) ddlJob->metadataSyncCommand); } } @@ -1234,7 +1236,7 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) char *currentSearchPath = CurrentSearchPath(); /* - * Given that we're relaying the query to the worker nodes directly, + * Given that we're relaying the query to the remote nodes directly, * we should set the search path exactly the same when necessary. */ if (currentSearchPath != NULL) @@ -1246,7 +1248,7 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) commandList = lappend(commandList, (char *) ddlJob->metadataSyncCommand); - SendBareCommandListToMetadataWorkers(commandList); + SendBareCommandListToRemoteMetadataNodes(commandList); } } PG_CATCH(); diff --git a/src/backend/distributed/commands/vacuum.c b/src/backend/distributed/commands/vacuum.c index 21638ba7f..f1cf3cb31 100644 --- a/src/backend/distributed/commands/vacuum.c +++ b/src/backend/distributed/commands/vacuum.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "commands/defrem.h" #include "commands/vacuum.h" @@ -184,7 +184,6 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList, CitusVacuumParams vacuumParams) { int relationIndex = 0; - int executedVacuumCount = 0; Oid relationId = InvalidOid; foreach_oid(relationId, relationIdList) @@ -197,7 +196,6 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList, /* local execution is not implemented for VACUUM commands */ bool localExecutionSupported = false; ExecuteUtilityTaskList(taskList, localExecutionSupported); - executedVacuumCount++; } relationIndex++; } diff --git a/src/backend/distributed/connection/locally_reserved_shared_connections.c b/src/backend/distributed/connection/locally_reserved_shared_connections.c index e3f7cb628..0a27ba17c 100644 --- a/src/backend/distributed/connection/locally_reserved_shared_connections.c +++ b/src/backend/distributed/connection/locally_reserved_shared_connections.c @@ -33,7 +33,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" @@ -302,8 +302,8 @@ EnsureConnectionPossibilityForRemotePrimaryNodes(void) * seem to cause any problems as none of the placements that we are * going to access would be on the new node. */ - List *primaryNodeList = ActivePrimaryRemoteNodeList(NoLock); - EnsureConnectionPossibilityForNodeList(primaryNodeList); + List *remoteNodeList = ActivePrimaryRemoteNodeList(NoLock); + EnsureConnectionPossibilityForNodeList(remoteNodeList); } diff --git a/src/backend/distributed/connection/placement_connection.c b/src/backend/distributed/connection/placement_connection.c index cc7962e37..3924e5a05 100644 --- a/src/backend/distributed/connection/placement_connection.c +++ b/src/backend/distributed/connection/placement_connection.c @@ -11,7 +11,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/hash.h" #include "distributed/colocation_utils.h" diff --git a/src/backend/distributed/connection/shared_connection_stats.c b/src/backend/distributed/connection/shared_connection_stats.c index fcd396fe4..104caed07 100644 --- a/src/backend/distributed/connection/shared_connection_stats.c +++ b/src/backend/distributed/connection/shared_connection_stats.c @@ -13,7 +13,7 @@ #include "postgres.h" #include "pgstat.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "libpq-fe.h" diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c index 220ea3ec7..1456f2fb5 100644 --- a/src/backend/distributed/deparser/citus_ruleutils.c +++ b/src/backend/distributed/deparser/citus_ruleutils.c @@ -10,7 +10,7 @@ #include "postgres.h" #include "miscadmin.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include diff --git a/src/backend/distributed/deparser/deparse_statistics_stmts.c b/src/backend/distributed/deparser/deparse_statistics_stmts.c index 4a165ec72..599738dc5 100644 --- a/src/backend/distributed/deparser/deparse_statistics_stmts.c +++ b/src/backend/distributed/deparser/deparse_statistics_stmts.c @@ -12,7 +12,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "catalog/namespace.h" #include "distributed/citus_ruleutils.h" diff --git a/src/backend/distributed/deparser/ruleutils_14.c b/src/backend/distributed/deparser/ruleutils_14.c index 6ab124537..01b74eab1 100644 --- a/src/backend/distributed/deparser/ruleutils_14.c +++ b/src/backend/distributed/deparser/ruleutils_14.c @@ -14,7 +14,7 @@ * This needs to be closely in sync with the core code. *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "pg_config.h" diff --git a/src/backend/distributed/deparser/ruleutils_15.c b/src/backend/distributed/deparser/ruleutils_15.c index 755e0f4cd..a84f8b113 100644 --- a/src/backend/distributed/deparser/ruleutils_15.c +++ b/src/backend/distributed/deparser/ruleutils_15.c @@ -14,7 +14,7 @@ * This needs to be closely in sync with the core code. *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "pg_config.h" diff --git a/src/backend/distributed/deparser/ruleutils_16.c b/src/backend/distributed/deparser/ruleutils_16.c index 31e8823b1..10373e487 100644 --- a/src/backend/distributed/deparser/ruleutils_16.c +++ b/src/backend/distributed/deparser/ruleutils_16.c @@ -14,7 +14,7 @@ * This needs to be closely in sync with the core code. *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "pg_config.h" diff --git a/src/backend/distributed/executor/citus_custom_scan.c b/src/backend/distributed/executor/citus_custom_scan.c index a2a2ff6cb..3403e27ca 100644 --- a/src/backend/distributed/executor/citus_custom_scan.c +++ b/src/backend/distributed/executor/citus_custom_scan.c @@ -9,7 +9,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" diff --git a/src/backend/distributed/executor/distributed_intermediate_results.c b/src/backend/distributed/executor/distributed_intermediate_results.c index c10303e18..cc351a1fc 100644 --- a/src/backend/distributed/executor/distributed_intermediate_results.c +++ b/src/backend/distributed/executor/distributed_intermediate_results.c @@ -8,7 +8,7 @@ *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include #include diff --git a/src/backend/distributed/executor/local_executor.c b/src/backend/distributed/executor/local_executor.c index 5661403b9..7168fd314 100644 --- a/src/backend/distributed/executor/local_executor.c +++ b/src/backend/distributed/executor/local_executor.c @@ -78,7 +78,7 @@ #include "postgres.h" #include "miscadmin.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/adaptive_executor.h" #include "distributed/commands/utility_hook.h" @@ -567,7 +567,7 @@ LogLocalCommand(Task *task) * * One slightly different case is modifications to replicated tables * (e.g., reference tables) where a single task ends in two separate tasks - * and the local task is added to localTaskList and the remaning ones to + * and the local task is added to localTaskList and the remaining ones to * the remoteTaskList. */ void diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index 662eaaf97..306698251 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" diff --git a/src/backend/distributed/executor/query_stats.c b/src/backend/distributed/executor/query_stats.c index 1ac70489c..b59777d45 100644 --- a/src/backend/distributed/executor/query_stats.c +++ b/src/backend/distributed/executor/query_stats.c @@ -15,7 +15,7 @@ #include "miscadmin.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/hash.h" #include "catalog/pg_authid.h" diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index f970cecd1..989e957af 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -11,7 +11,7 @@ #include "postgres.h" #include "distributed/commands.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/heapam.h" diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index c6a8b0a22..fa9da8b75 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" @@ -149,7 +149,7 @@ ObjectExists(const ObjectAddress *address) /* * MarkObjectDistributed marks an object as a distributed object. Marking is done * by adding appropriate entries to citus.pg_dist_object and also marking the object - * as distributed by opening a connection using current user to all of the workers + * as distributed by opening a connection using current user to all remote nodes * with metadata if object propagation is on. * * This function should be used if the user creating the given object. If you want @@ -164,7 +164,7 @@ MarkObjectDistributed(const ObjectAddress *distAddress) { char *workerPgDistObjectUpdateCommand = CreatePgDistObjectEntryCommand(distAddress); - SendCommandToWorkersWithMetadata(workerPgDistObjectUpdateCommand); + SendCommandToRemoteNodesWithMetadata(workerPgDistObjectUpdateCommand); } } @@ -172,7 +172,7 @@ MarkObjectDistributed(const ObjectAddress *distAddress) /* * MarkObjectDistributedViaSuperUser marks an object as a distributed object. Marking * is done by adding appropriate entries to citus.pg_dist_object and also marking the - * object as distributed by opening a connection using super user to all of the workers + * object as distributed by opening a connection using super user to all remote nodes * with metadata if object propagation is on. * * This function should be used to mark dependent object as distributed. If you want @@ -187,7 +187,7 @@ MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress) { char *workerPgDistObjectUpdateCommand = CreatePgDistObjectEntryCommand(distAddress); - SendCommandToWorkersWithMetadataViaSuperUser(workerPgDistObjectUpdateCommand); + SendCommandToRemoteNodesWithMetadataViaSuperUser(workerPgDistObjectUpdateCommand); } } diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 85a945308..44179cffb 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -8,7 +8,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "pg_version_compat.h" #include "stdint.h" diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 40bdae0ea..53dc7e747 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -134,7 +134,7 @@ static bool ShouldSkipMetadataChecks(void); static void EnsurePartitionMetadataIsSane(Oid relationId, char distributionMethod, int colocationId, char replicationModel, Var *distributionKey); -static void EnsureCoordinatorInitiatedOperation(void); +static void EnsureCitusInitiatedOperation(void); static void EnsureShardMetadataIsSane(Oid relationId, int64 shardId, char storageType, text *shardMinValue, text *shardMaxValue); @@ -1001,7 +1001,7 @@ citus_internal_add_object_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); /* * Ensure given distributionArgumentIndex and colocationId values are @@ -3090,7 +3090,7 @@ citus_internal_add_partition_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); if (distributionMethod == DISTRIBUTE_BY_NONE && distributionColumnVar != NULL) { @@ -3206,7 +3206,7 @@ citus_internal_delete_partition_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } DeletePartitionRow(relationId); @@ -3254,7 +3254,7 @@ citus_internal_add_shard_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); /* * Even if the table owner is a malicious user and the shard metadata is @@ -3272,19 +3272,13 @@ citus_internal_add_shard_metadata(PG_FUNCTION_ARGS) /* - * EnsureCoordinatorInitiatedOperation is a helper function which ensures that - * the execution is initiated by the coordinator on a worker node. + * EnsureCitusInitiatedOperation is a helper function which ensures that + * the execution is initiated by Citus. */ static void -EnsureCoordinatorInitiatedOperation(void) +EnsureCitusInitiatedOperation(void) { - /* - * We are restricting the operation to only MX workers with the local group id - * check. The other two checks are to ensure that the operation is initiated - * by the coordinator. - */ - if (!(IsCitusInternalBackend() || IsRebalancerInternalBackend()) || - GetLocalGroupId() == COORDINATOR_GROUP_ID) + if (!(IsCitusInternalBackend() || IsRebalancerInternalBackend())) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("This is an internal Citus function can only be " @@ -3465,7 +3459,7 @@ citus_internal_delete_placement_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } DeleteShardPlacementRow(placementId); @@ -3513,7 +3507,7 @@ citus_internal_add_placement_metadata_internal(int64 shardId, int64 shardLength, if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); /* * Even if the table owner is a malicious user, as long as the shard placements @@ -3608,7 +3602,7 @@ citus_internal_update_placement_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); if (!ShardExists(shardId)) { @@ -3672,7 +3666,7 @@ citus_internal_delete_shard_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); if (!ShardExists(shardId)) { @@ -3715,7 +3709,7 @@ citus_internal_update_relation_colocation(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); /* ensure that the table is in pg_dist_partition */ char partitionMethod = PartitionMethodViaCatalog(relationId); @@ -3781,7 +3775,7 @@ citus_internal_add_colocation_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } InsertColocationGroupLocally(colocationId, shardCount, replicationFactor, @@ -3806,7 +3800,7 @@ citus_internal_delete_colocation_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } DeleteColocationGroupLocally(colocationId); @@ -3885,7 +3879,7 @@ citus_internal_update_none_dist_table_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } UpdateNoneDistTableMetadata(relationId, replicationModel, diff --git a/src/backend/distributed/metadata/metadata_utility.c b/src/backend/distributed/metadata/metadata_utility.c index ec41e4eb2..0d9963c12 100644 --- a/src/backend/distributed/metadata/metadata_utility.c +++ b/src/backend/distributed/metadata/metadata_utility.c @@ -17,7 +17,7 @@ #include "libpq-fe.h" #include "miscadmin.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/htup_details.h" diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index a73f2e9d2..041c6dcc4 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -2742,6 +2742,25 @@ EnsureCoordinator(void) } +/* + * EnsurePropagationToCoordinator checks whether the coordinator is added to the + * metadata if we're not on the coordinator. + * + * Given that metadata syncing skips syncing metadata to the coordinator, we need + * too make sure that the coordinator is added to the metadata before propagating + * a command from a worker. For this reason, today we use this only for the commands + * that we support propagating from workers. + */ +void +EnsurePropagationToCoordinator(void) +{ + if (!IsCoordinator()) + { + EnsureCoordinatorIsInMetadata(); + } +} + + /* * EnsureCoordinatorIsInMetadata checks whether the coordinator is added to the * metadata, which is required for many operations. diff --git a/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c b/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c index 54f764fc1..a7f40e2ad 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c +++ b/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c @@ -24,7 +24,7 @@ #include "distributed/citus_safe_lib.h" #include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/version_compat.h" #include "nodes/value.h" #include "utils/array.h" diff --git a/src/backend/distributed/operations/create_shards.c b/src/backend/distributed/operations/create_shards.c index d0fcc9612..8bc3b249f 100644 --- a/src/backend/distributed/operations/create_shards.c +++ b/src/backend/distributed/operations/create_shards.c @@ -158,13 +158,6 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, "replication factor."))); } - /* if we have enough nodes, add an extra placement attempt for backup */ - uint32 placementAttemptCount = (uint32) replicationFactor; - if (workerNodeCount > replicationFactor) - { - placementAttemptCount++; - } - /* set shard storage type according to relation type */ char shardStorageType = ShardStorageType(distributedTableId); diff --git a/src/backend/distributed/operations/delete_protocol.c b/src/backend/distributed/operations/delete_protocol.c index abed39272..54cb568be 100644 --- a/src/backend/distributed/operations/delete_protocol.c +++ b/src/backend/distributed/operations/delete_protocol.c @@ -15,7 +15,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "c.h" #include "fmgr.h" diff --git a/src/backend/distributed/operations/modify_multiple_shards.c b/src/backend/distributed/operations/modify_multiple_shards.c index 8def1b26e..8d596a10b 100644 --- a/src/backend/distributed/operations/modify_multiple_shards.c +++ b/src/backend/distributed/operations/modify_multiple_shards.c @@ -14,7 +14,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "funcapi.h" #include "libpq-fe.h" diff --git a/src/backend/distributed/operations/node_protocol.c b/src/backend/distributed/operations/node_protocol.c index a3f7092d1..eeaf34321 100644 --- a/src/backend/distributed/operations/node_protocol.c +++ b/src/backend/distributed/operations/node_protocol.c @@ -13,7 +13,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "c.h" #include "fmgr.h" diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index e3ee4aa4d..d339ac56a 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -17,7 +17,7 @@ #include -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/htup_details.h" #include "access/genam.h" diff --git a/src/backend/distributed/operations/worker_node_manager.c b/src/backend/distributed/operations/worker_node_manager.c index 76f2732ba..e616770dd 100644 --- a/src/backend/distributed/operations/worker_node_manager.c +++ b/src/backend/distributed/operations/worker_node_manager.c @@ -180,7 +180,7 @@ ActivePrimaryNodeList(LOCKMODE lockMode) /* * ActivePrimaryRemoteNodeList returns a list of all active primary nodes in - * workerNodeHash. + * workerNodeHash except the local one. */ List * ActivePrimaryRemoteNodeList(LOCKMODE lockMode) diff --git a/src/backend/distributed/planner/combine_query_planner.c b/src/backend/distributed/planner/combine_query_planner.c index e61ff8daf..6a171dac1 100644 --- a/src/backend/distributed/planner/combine_query_planner.c +++ b/src/backend/distributed/planner/combine_query_planner.c @@ -11,7 +11,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "catalog/pg_type.h" #include "distributed/citus_ruleutils.h" diff --git a/src/backend/distributed/planner/cte_inline.c b/src/backend/distributed/planner/cte_inline.c index ce258916d..9a1bbab96 100644 --- a/src/backend/distributed/planner/cte_inline.c +++ b/src/backend/distributed/planner/cte_inline.c @@ -13,7 +13,7 @@ */ #include "postgres.h" #include "pg_version_compat.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/cte_inline.h" #include "nodes/nodeFuncs.h" diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index 65278d1ea..7ad419f0a 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -9,7 +9,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "funcapi.h" @@ -702,6 +702,7 @@ DissuadePlannerFromUsingPlan(PlannedStmt *plan) * Arbitrarily high cost, but low enough that it can be added up * without overflowing by choose_custom_plan(). */ + Assert(plan != NULL); plan->planTree->total_cost = FLT_MAX / 100000000; } diff --git a/src/backend/distributed/planner/extended_op_node_utils.c b/src/backend/distributed/planner/extended_op_node_utils.c index 0a2a8b834..bb87b6949 100644 --- a/src/backend/distributed/planner/extended_op_node_utils.c +++ b/src/backend/distributed/planner/extended_op_node_utils.c @@ -9,7 +9,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/extended_op_node_utils.h" #include "distributed/listutils.h" diff --git a/src/backend/distributed/planner/fast_path_router_planner.c b/src/backend/distributed/planner/fast_path_router_planner.c index ed256296c..1d58911eb 100644 --- a/src/backend/distributed/planner/fast_path_router_planner.c +++ b/src/backend/distributed/planner/fast_path_router_planner.c @@ -34,7 +34,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/distributed_planner.h" #include "distributed/insert_select_planner.h" diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index 2f8da29c0..bacbe16af 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" @@ -525,8 +525,16 @@ ShardPlacementForFunctionColocatedWithDistTable(DistObjectCacheEntry *procedure, if (partitionParam->paramkind == PARAM_EXTERN) { - /* Don't log a message, we should end up here again without a parameter */ - DissuadePlannerFromUsingPlan(plan); + /* + * Don't log a message, we should end up here again without a + * parameter. + * Note that "plan" can be null, for example when a CALL statement + * is prepared. + */ + if (plan) + { + DissuadePlannerFromUsingPlan(plan); + } return NULL; } } diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c index 1b7f468f8..dd4bee90f 100644 --- a/src/backend/distributed/planner/insert_select_planner.c +++ b/src/backend/distributed/planner/insert_select_planner.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "catalog/pg_class.h" #include "catalog/pg_type.h" diff --git a/src/backend/distributed/planner/local_distributed_join_planner.c b/src/backend/distributed/planner/local_distributed_join_planner.c index d93921966..1867a790c 100644 --- a/src/backend/distributed/planner/local_distributed_join_planner.c +++ b/src/backend/distributed/planner/local_distributed_join_planner.c @@ -71,7 +71,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "funcapi.h" diff --git a/src/backend/distributed/planner/local_plan_cache.c b/src/backend/distributed/planner/local_plan_cache.c index 946d9fc46..1ac8e24a3 100644 --- a/src/backend/distributed/planner/local_plan_cache.c +++ b/src/backend/distributed/planner/local_plan_cache.c @@ -9,7 +9,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" diff --git a/src/backend/distributed/planner/merge_planner.c b/src/backend/distributed/planner/merge_planner.c index 3cadea23a..5c593d153 100644 --- a/src/backend/distributed/planner/merge_planner.c +++ b/src/backend/distributed/planner/merge_planner.c @@ -29,7 +29,7 @@ #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_router_planner.h" #include "distributed/pg_dist_node_metadata.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/query_pushdown_planning.h" #include "distributed/query_colocation_checker.h" #include "distributed/repartition_executor.h" diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index 94d125f41..bf9a1871e 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -11,7 +11,7 @@ #include "libpq-fe.h" #include "miscadmin.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/htup_details.h" #include "access/xact.h" diff --git a/src/backend/distributed/planner/multi_join_order.c b/src/backend/distributed/planner/multi_join_order.c index 7714a1e08..0eede6b9b 100644 --- a/src/backend/distributed/planner/multi_join_order.c +++ b/src/backend/distributed/planner/multi_join_order.c @@ -13,7 +13,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index 455f050a0..9001d724d 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -13,7 +13,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include diff --git a/src/backend/distributed/planner/multi_logical_planner.c b/src/backend/distributed/planner/multi_logical_planner.c index 0969e0c7c..d6897d17b 100644 --- a/src/backend/distributed/planner/multi_logical_planner.c +++ b/src/backend/distributed/planner/multi_logical_planner.c @@ -14,7 +14,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/heapam.h" #include "access/nbtree.h" diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index 21befa6f2..aa2c2b5b4 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -13,7 +13,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include #include diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index e70de5bbd..c0930ca34 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -13,7 +13,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include diff --git a/src/backend/distributed/planner/query_colocation_checker.c b/src/backend/distributed/planner/query_colocation_checker.c index 77baab197..fd1df1be9 100644 --- a/src/backend/distributed/planner/query_colocation_checker.c +++ b/src/backend/distributed/planner/query_colocation_checker.c @@ -21,7 +21,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/relation.h" #include "distributed/multi_logical_planner.h" diff --git a/src/backend/distributed/planner/query_pushdown_planning.c b/src/backend/distributed/planner/query_pushdown_planning.c index 3bad73459..8ccc35c82 100644 --- a/src/backend/distributed/planner/query_pushdown_planning.c +++ b/src/backend/distributed/planner/query_pushdown_planning.c @@ -21,7 +21,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/citus_clauses.h" #include "distributed/citus_ruleutils.h" diff --git a/src/backend/distributed/planner/recursive_planning.c b/src/backend/distributed/planner/recursive_planning.c index c2426cf5f..d16280662 100644 --- a/src/backend/distributed/planner/recursive_planning.c +++ b/src/backend/distributed/planner/recursive_planning.c @@ -48,7 +48,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "funcapi.h" diff --git a/src/backend/distributed/planner/relation_restriction_equivalence.c b/src/backend/distributed/planner/relation_restriction_equivalence.c index 368ba2026..4b51a537d 100644 --- a/src/backend/distributed/planner/relation_restriction_equivalence.c +++ b/src/backend/distributed/planner/relation_restriction_equivalence.c @@ -10,7 +10,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/colocation_utils.h" #include "distributed/distributed_planner.h" diff --git a/src/backend/distributed/planner/shard_pruning.c b/src/backend/distributed/planner/shard_pruning.c index 5375a70fa..ef244ea66 100644 --- a/src/backend/distributed/planner/shard_pruning.c +++ b/src/backend/distributed/planner/shard_pruning.c @@ -66,7 +66,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "fmgr.h" diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index f66e309ab..97f6fdb3d 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -15,7 +15,7 @@ #include "pgstat.h" #include "libpq-fe.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" diff --git a/src/backend/distributed/test/fake_am.c b/src/backend/distributed/test/fake_am.c index 8a723e4c4..4b11d7871 100644 --- a/src/backend/distributed/test/fake_am.c +++ b/src/backend/distributed/test/fake_am.c @@ -19,7 +19,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "pg_version_compat.h" diff --git a/src/backend/distributed/test/fake_fdw.c b/src/backend/distributed/test/fake_fdw.c index 4784248c0..f53242f7f 100644 --- a/src/backend/distributed/test/fake_fdw.c +++ b/src/backend/distributed/test/fake_fdw.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "c.h" #include "fmgr.h" diff --git a/src/backend/distributed/test/prune_shard_list.c b/src/backend/distributed/test/prune_shard_list.c index a9f5e4a88..023a759cb 100644 --- a/src/backend/distributed/test/prune_shard_list.c +++ b/src/backend/distributed/test/prune_shard_list.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "c.h" #include "fmgr.h" diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index 3e2ea5ca1..c1981b77a 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" #include "unistd.h" diff --git a/src/backend/distributed/transaction/relation_access_tracking.c b/src/backend/distributed/transaction/relation_access_tracking.c index b0af4e476..3ad61ac79 100644 --- a/src/backend/distributed/transaction/relation_access_tracking.c +++ b/src/backend/distributed/transaction/relation_access_tracking.c @@ -15,7 +15,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" diff --git a/src/backend/distributed/transaction/transaction_recovery.c b/src/backend/distributed/transaction/transaction_recovery.c index b46419dc2..a833f5a46 100644 --- a/src/backend/distributed/transaction/transaction_recovery.c +++ b/src/backend/distributed/transaction/transaction_recovery.c @@ -14,7 +14,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "miscadmin.h" #include "libpq-fe.h" diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index 03ecbea72..3399365aa 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -34,6 +34,12 @@ #include "utils/memutils.h" #include "utils/builtins.h" +static void SendCommandToRemoteMetadataNodesParams(const char *command, + const char *user, int parameterCount, + const Oid *parameterTypes, + const char *const *parameterValues); +static void SendBareCommandListToMetadataNodesInternal(List *commandList, + TargetWorkerSet targetWorkerSet); static void SendCommandToMetadataWorkersParams(const char *command, const char *user, int parameterCount, const Oid *parameterTypes, @@ -150,6 +156,74 @@ SendCommandListToWorkersWithMetadata(List *commands) } +/* + * SendCommandToRemoteNodesWithMetadata sends a command to remote nodes in + * parallel. Commands are committed on the nodes when the local transaction + * commits. + */ +void +SendCommandToRemoteNodesWithMetadata(const char *command) +{ + SendCommandToRemoteMetadataNodesParams(command, CurrentUserName(), + 0, NULL, NULL); +} + + +/* + * SendCommandToRemoteNodesWithMetadataViaSuperUser sends a command to remote + * nodes in parallel by opening a super user connection. Commands are committed + * on the nodes when the local transaction commits. The connection are made as + * the extension owner to ensure write access to the Citus metadata tables. + * + * Since we prevent to open superuser connections for metadata tables, it is + * discouraged to use it. Consider using it only for propagating pg_dist_object + * tuples for dependent objects. + */ +void +SendCommandToRemoteNodesWithMetadataViaSuperUser(const char *command) +{ + SendCommandToRemoteMetadataNodesParams(command, CitusExtensionOwnerName(), + 0, NULL, NULL); +} + + +/* + * SendCommandListToRemoteNodesWithMetadata sends all commands to remote nodes + * with the current user. See `SendCommandToRemoteNodesWithMetadata`for details. + */ +void +SendCommandListToRemoteNodesWithMetadata(List *commands) +{ + char *command = NULL; + foreach_ptr(command, commands) + { + SendCommandToRemoteNodesWithMetadata(command); + } +} + + +/* + * SendCommandToRemoteMetadataNodesParams is a wrapper around + * SendCommandToWorkersParamsInternal() that can be used to send commands + * to remote metadata nodes. + */ +static void +SendCommandToRemoteMetadataNodesParams(const char *command, + const char *user, int parameterCount, + const Oid *parameterTypes, + const char *const *parameterValues) +{ + /* use METADATA_NODES so that ErrorIfAnyMetadataNodeOutOfSync checks local node as well */ + List *workerNodeList = TargetWorkerSetNodeList(METADATA_NODES, + RowShareLock); + + ErrorIfAnyMetadataNodeOutOfSync(workerNodeList); + + SendCommandToWorkersParamsInternal(REMOTE_METADATA_NODES, command, user, + parameterCount, parameterTypes, parameterValues); +} + + /* * TargetWorkerSetNodeList returns a list of WorkerNode's that satisfies the * TargetWorkerSet. @@ -162,17 +236,29 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode) { workerNodeList = ActivePrimaryNodeList(lockMode); } - else + else if (targetWorkerSet == REMOTE_NODES || targetWorkerSet == REMOTE_METADATA_NODES) + { + workerNodeList = ActivePrimaryRemoteNodeList(lockMode); + } + else if (targetWorkerSet == NON_COORDINATOR_METADATA_NODES || + targetWorkerSet == NON_COORDINATOR_NODES) { workerNodeList = ActivePrimaryNonCoordinatorNodeList(lockMode); } + else + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid target worker set: %d", targetWorkerSet))); + } + List *result = NIL; WorkerNode *workerNode = NULL; foreach_ptr(workerNode, workerNodeList) { - if ((targetWorkerSet == NON_COORDINATOR_METADATA_NODES || targetWorkerSet == - METADATA_NODES) && + if ((targetWorkerSet == NON_COORDINATOR_METADATA_NODES || + targetWorkerSet == REMOTE_METADATA_NODES || + targetWorkerSet == METADATA_NODES) && !workerNode->hasMetadata) { continue; @@ -186,16 +272,42 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode) /* - * SendBareCommandListToMetadataWorkers sends a list of commands to metadata - * workers in serial. Commands are committed immediately: new connections are - * always used and no transaction block is used (hence "bare"). The connections - * are made as the extension owner to ensure write access to the Citus metadata - * tables. Primarly useful for INDEX commands using CONCURRENTLY. + * SendBareCommandListToRemoteMetadataNodes is a wrapper around + * SendBareCommandListToMetadataNodesInternal() that can be used to send + * bare commands to remote metadata nodes. + */ +void +SendBareCommandListToRemoteMetadataNodes(List *commandList) +{ + SendBareCommandListToMetadataNodesInternal(commandList, + REMOTE_METADATA_NODES); +} + + +/* + * SendBareCommandListToMetadataWorkers is a wrapper around + * SendBareCommandListToMetadataNodesInternal() that can be used to send + * bare commands to metadata workers. */ void SendBareCommandListToMetadataWorkers(List *commandList) { - TargetWorkerSet targetWorkerSet = NON_COORDINATOR_METADATA_NODES; + SendBareCommandListToMetadataNodesInternal(commandList, + NON_COORDINATOR_METADATA_NODES); +} + + +/* + * SendBareCommandListToMetadataNodesInternal sends a list of commands to given + * target worker set in serial. Commands are committed immediately: new connections + * are always used and no transaction block is used (hence "bare"). The connections + * are made as the extension owner to ensure write access to the Citus metadata + * tables. Primarly useful for INDEX commands using CONCURRENTLY. + */ +static void +SendBareCommandListToMetadataNodesInternal(List *commandList, + TargetWorkerSet targetWorkerSet) +{ List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, RowShareLock); char *nodeUser = CurrentUserName(); diff --git a/src/backend/distributed/utils/citus_nodefuncs.c b/src/backend/distributed/utils/citus_nodefuncs.c index aee1ff48a..0998560fe 100644 --- a/src/backend/distributed/utils/citus_nodefuncs.c +++ b/src/backend/distributed/utils/citus_nodefuncs.c @@ -10,7 +10,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "catalog/pg_type.h" #include "distributed/citus_nodes.h" diff --git a/src/backend/distributed/utils/citus_outfuncs.c b/src/backend/distributed/utils/citus_outfuncs.c index 9b4ac809c..751063789 100644 --- a/src/backend/distributed/utils/citus_outfuncs.c +++ b/src/backend/distributed/utils/citus_outfuncs.c @@ -18,7 +18,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include diff --git a/src/backend/distributed/utils/citus_safe_lib.c b/src/backend/distributed/utils/citus_safe_lib.c index 82fa8f6f2..cbd06fc50 100644 --- a/src/backend/distributed/utils/citus_safe_lib.c +++ b/src/backend/distributed/utils/citus_safe_lib.c @@ -14,7 +14,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "safe_lib.h" diff --git a/src/backend/distributed/utils/enable_ssl.c b/src/backend/distributed/utils/enable_ssl.c index cac32f74c..35b1e0f1a 100644 --- a/src/backend/distributed/utils/enable_ssl.c +++ b/src/backend/distributed/utils/enable_ssl.c @@ -18,7 +18,7 @@ * it otherwise we get warnings about redefining this value. This needs to be * done before including libpq.h. */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/connection_management.h" #include "distributed/memutils.h" diff --git a/src/backend/distributed/utils/foreign_key_relationship.c b/src/backend/distributed/utils/foreign_key_relationship.c index d30c767df..d69d9044d 100644 --- a/src/backend/distributed/utils/foreign_key_relationship.c +++ b/src/backend/distributed/utils/foreign_key_relationship.c @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/htup_details.h" diff --git a/src/backend/distributed/utils/log_utils.c b/src/backend/distributed/utils/log_utils.c index 59a090a16..7d808591b 100644 --- a/src/backend/distributed/utils/log_utils.c +++ b/src/backend/distributed/utils/log_utils.c @@ -9,7 +9,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "utils/guc.h" #include "distributed/log_utils.h" diff --git a/src/backend/distributed/utils/maintenanced.c b/src/backend/distributed/utils/maintenanced.c index 22a0843bd..851335abe 100644 --- a/src/backend/distributed/utils/maintenanced.c +++ b/src/backend/distributed/utils/maintenanced.c @@ -16,7 +16,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c index 924ba4c54..404d792f9 100644 --- a/src/backend/distributed/utils/multi_partitioning_utils.c +++ b/src/backend/distributed/utils/multi_partitioning_utils.c @@ -6,7 +6,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "access/genam.h" #include "access/heapam.h" diff --git a/src/backend/distributed/utils/task_execution_utils.c b/src/backend/distributed/utils/task_execution_utils.c index 50652b6bd..7251514b5 100644 --- a/src/backend/distributed/utils/task_execution_utils.c +++ b/src/backend/distributed/utils/task_execution_utils.c @@ -6,7 +6,7 @@ #include #include -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "common/hashfn.h" diff --git a/src/include/columnar/columnar_version_compat.h b/src/include/columnar/columnar_version_compat.h index 0e0ae3112..d9b29cdb0 100644 --- a/src/include/columnar/columnar_version_compat.h +++ b/src/include/columnar/columnar_version_compat.h @@ -12,7 +12,7 @@ #ifndef COLUMNAR_COMPAT_H #define COLUMNAR_COMPAT_H -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #if PG_VERSION_NUM >= PG_VERSION_15 #define ExecARDeleteTriggers_compat(a, b, c, d, e, f) \ diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index 34b2945ac..c474dcc43 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -10,7 +10,7 @@ #ifndef MULTI_UTILITY_H #define MULTI_UTILITY_H -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "postgres.h" diff --git a/src/include/distributed/distributed_planner.h b/src/include/distributed/distributed_planner.h index d46fbf2e6..bc8f5bc94 100644 --- a/src/include/distributed/distributed_planner.h +++ b/src/include/distributed/distributed_planner.h @@ -12,7 +12,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "nodes/plannodes.h" diff --git a/src/include/distributed/hash_helpers.h b/src/include/distributed/hash_helpers.h index 2b16d110c..168879b4d 100644 --- a/src/include/distributed/hash_helpers.h +++ b/src/include/distributed/hash_helpers.h @@ -11,7 +11,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "utils/hsearch.h" diff --git a/src/include/distributed/multi_physical_planner.h b/src/include/distributed/multi_physical_planner.h index 35d83eb33..6bdc95cb3 100644 --- a/src/include/distributed/multi_physical_planner.h +++ b/src/include/distributed/multi_physical_planner.h @@ -16,7 +16,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "c.h" diff --git a/src/include/distributed/recursive_planning.h b/src/include/distributed/recursive_planning.h index a883047f6..87df7fba2 100644 --- a/src/include/distributed/recursive_planning.h +++ b/src/include/distributed/recursive_planning.h @@ -10,7 +10,7 @@ #ifndef RECURSIVE_PLANNING_H #define RECURSIVE_PLANNING_H -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "distributed/errormessage.h" #include "distributed/log_utils.h" #include "distributed/relation_restriction_equivalence.h" diff --git a/src/include/distributed/relation_utils.h b/src/include/distributed/relation_utils.h index acf84a9da..d3a5ab105 100644 --- a/src/include/distributed/relation_utils.h +++ b/src/include/distributed/relation_utils.h @@ -13,7 +13,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #if PG_VERSION_NUM >= PG_VERSION_16 #include "parser/parse_relation.h" #endif diff --git a/src/include/distributed/worker_manager.h b/src/include/distributed/worker_manager.h index 5ad7f4962..694d38ccf 100644 --- a/src/include/distributed/worker_manager.h +++ b/src/include/distributed/worker_manager.h @@ -87,6 +87,7 @@ extern WorkerNode * FindNodeWithNodeId(int nodeId, bool missingOk); extern WorkerNode * ModifiableWorkerNode(const char *nodeName, int32 nodePort); extern List * ReadDistNode(bool includeNodesFromOtherClusters); extern void EnsureCoordinator(void); +extern void EnsurePropagationToCoordinator(void); extern void EnsureCoordinatorIsInMetadata(void); extern void InsertCoordinatorIfClusterEmpty(void); extern uint32 GroupForNode(char *nodeName, int32 nodePort); diff --git a/src/include/distributed/worker_transaction.h b/src/include/distributed/worker_transaction.h index 631940edf..d622fe366 100644 --- a/src/include/distributed/worker_transaction.h +++ b/src/include/distributed/worker_transaction.h @@ -29,11 +29,22 @@ typedef enum TargetWorkerSet */ NON_COORDINATOR_METADATA_NODES, + /* + * All the active primary nodes in the metadata which have metadata + * except the local node + */ + REMOTE_METADATA_NODES, + /* * All the active primary nodes in the metadata except the coordinator */ NON_COORDINATOR_NODES, + /* + * All the active primary nodes in the metadata except the local node + */ + REMOTE_NODES, + /* * All active primary nodes in the metadata */ @@ -74,6 +85,10 @@ extern bool SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction(cons extern void SendCommandToWorkersWithMetadata(const char *command); extern void SendCommandToWorkersWithMetadataViaSuperUser(const char *command); extern void SendCommandListToWorkersWithMetadata(List *commands); +extern void SendCommandToRemoteNodesWithMetadata(const char *command); +extern void SendCommandToRemoteNodesWithMetadataViaSuperUser(const char *command); +extern void SendCommandListToRemoteNodesWithMetadata(List *commands); +extern void SendBareCommandListToRemoteMetadataNodes(List *commandList); extern void SendBareCommandListToMetadataWorkers(List *commandList); extern void EnsureNoModificationsHaveBeenDone(void); extern void SendCommandListToWorkerOutsideTransaction(const char *nodeName, diff --git a/src/include/pg_version_compat.h b/src/include/pg_version_compat.h index 1bdbae580..4e874e2ee 100644 --- a/src/include/pg_version_compat.h +++ b/src/include/pg_version_compat.h @@ -11,7 +11,7 @@ #ifndef PG_VERSION_COMPAT_H #define PG_VERSION_COMPAT_H -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #if PG_VERSION_NUM >= PG_VERSION_16 diff --git a/src/include/distributed/pg_version_constants.h b/src/include/pg_version_constants.h similarity index 100% rename from src/include/distributed/pg_version_constants.h rename to src/include/pg_version_constants.h diff --git a/src/test/regress/Pipfile.lock b/src/test/regress/Pipfile.lock index 15cb7ecda..bdb42a1c3 100644 --- a/src/test/regress/Pipfile.lock +++ b/src/test/regress/Pipfile.lock @@ -127,72 +127,61 @@ }, "cffi": { "hashes": [ - "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5", - "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef", - "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104", - "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426", - "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405", - "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375", - "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a", - "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e", - "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc", - "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf", - "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185", - "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497", - "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3", - "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35", - "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c", - "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83", - "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21", - "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca", - "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984", - "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac", - "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd", - "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee", - "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a", - "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2", - "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192", - "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7", - "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585", - "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f", - "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e", - "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27", - "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b", - "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e", - "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e", - "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d", - "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c", - "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415", - "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82", - "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02", - "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314", - "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325", - "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c", - "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3", - "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914", - "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045", - "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d", - "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9", - "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5", - "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2", - "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c", - "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3", - "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2", - "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8", - "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d", - "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d", - "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9", - "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162", - "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76", - "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4", - "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e", - "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9", - "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6", - "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b", - "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01", - "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0" + "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc", + "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a", + "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417", + "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab", + "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520", + "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36", + "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743", + "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8", + "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed", + "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684", + "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56", + "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324", + "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d", + "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235", + "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e", + "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088", + "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000", + "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7", + "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e", + "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673", + "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c", + "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe", + "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2", + "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098", + "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8", + "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a", + "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0", + "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b", + "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896", + "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e", + "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9", + "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2", + "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b", + "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6", + "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404", + "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f", + "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0", + "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4", + "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc", + "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936", + "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba", + "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872", + "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb", + "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614", + "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1", + "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d", + "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969", + "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b", + "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4", + "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627", + "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", + "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" ], - "version": "==1.15.1" + "markers": "python_version >= '3.8'", + "version": "==1.16.0" }, "click": { "hashes": [ @@ -420,78 +409,78 @@ "mitmproxy": { "editable": true, "git": "https://github.com/citusdata/mitmproxy.git", - "markers": "python_version >= '3.10'", + "markers": "python_version >= '3.9'", "ref": "2fd18ef051b987925a36337ab1d61aa674353b44" }, "msgpack": { "hashes": [ - "sha256:00ce5f827d4f26fc094043e6f08b6069c1b148efa2631c47615ae14fb6cafc89", - "sha256:04450e4b5e1e662e7c86b6aafb7c230af9334fd0becf5e6b80459a507884241c", - "sha256:099c3d8a027367e1a6fc55d15336f04ff65c60c4f737b5739f7db4525c65fe9e", - "sha256:102cfb54eaefa73e8ca1e784b9352c623524185c98e057e519545131a56fb0af", - "sha256:14db7e1b7a7ed362b2f94897bf2486c899c8bb50f6e34b2db92fe534cdab306f", - "sha256:159cfec18a6e125dd4723e2b1de6f202b34b87c850fb9d509acfd054c01135e9", - "sha256:1dc67b40fe81217b308ab12651adba05e7300b3a2ccf84d6b35a878e308dd8d4", - "sha256:1f0e36a5fa7a182cde391a128a64f437657d2b9371dfa42eda3436245adccbf5", - "sha256:229ccb6713c8b941eaa5cf13dc7478eba117f21513b5893c35e44483e2f0c9c8", - "sha256:25d3746da40f3c8c59c3b1d001e49fd2aa17904438f980d9a391370366df001e", - "sha256:32c0aff31f33033f4961abc01f78497e5e07bac02a508632aef394b384d27428", - "sha256:33bbf47ea5a6ff20c23426106e81863cdbb5402de1825493026ce615039cc99d", - "sha256:35ad5aed9b52217d4cea739d0ea3a492a18dd86fecb4b132668a69f27fb0363b", - "sha256:3910211b0ab20be3a38e0bb944ed45bd4265d8d9f11a3d1674b95b298e08dd5c", - "sha256:3b5658b1f9e486a2eec4c0c688f213a90085b9cf2fec76ef08f98fdf6c62f4b9", - "sha256:40b801b768f5a765e33c68f30665d3c6ee1c8623a2d2bb78e6e59f2db4e4ceb7", - "sha256:47275ff73005a3e5e146e50baa2378e1730cba6e292f0222bc496a8e4c4adfc8", - "sha256:55bb4a1bf94e39447bc08238a2fb8a767460388a8192f67c103442eb36920887", - "sha256:5b08676a17e3f791daad34d5fcb18479e9c85e7200d5a17cbe8de798643a7e37", - "sha256:5b16344032a27b2ccfd341f89dadf3e4ef6407d91e4b93563c14644a8abb3ad7", - "sha256:5c5e05e4f5756758c58a8088aa10dc70d851c89f842b611fdccfc0581c1846bc", - "sha256:5cd67674db3c73026e0a2c729b909780e88bd9cbc8184256f9567640a5d299a8", - "sha256:5e7fae9ca93258a956551708cf60dc6c8145574e32ce8c8c4d894e63bcb04341", - "sha256:61213482b5a387ead9e250e9e3cb290292feca39dc83b41c3b1b7b8ffc8d8ecb", - "sha256:619a63753ba9e792fe3c6c0fc2b9ee2cfbd92153dd91bee029a89a71eb2942cd", - "sha256:652e4b7497825b0af6259e2c54700e6dc33d2fc4ed92b8839435090d4c9cc911", - "sha256:68569509dd015fcdd1e6b2b3ccc8c51fd27d9a97f461ccc909270e220ee09685", - "sha256:6a01a072b2219b65a6ff74df208f20b2cac9401c60adb676ee34e53b4c651077", - "sha256:70843788c85ca385846a2d2f836efebe7bb2687ca0734648bf5c9dc6c55602d2", - "sha256:76820f2ece3b0a7c948bbb6a599020e29574626d23a649476def023cbb026787", - "sha256:7a006c300e82402c0c8f1ded11352a3ba2a61b87e7abb3054c845af2ca8d553c", - "sha256:7baf16fd8908a025c4a8d7b699103e72d41f967e2aee5a2065432bcdbd9fd06e", - "sha256:7ecf431786019a7bfedc28281531d706627f603e3691d64eccdbce3ecd353823", - "sha256:885de1ed5ea01c1bfe0a34c901152a264c3c1f8f1d382042b92ea354bd14bb0e", - "sha256:88cdb1da7fdb121dbb3116910722f5acab4d6e8bfcacab8fafe27e2e7744dc6a", - "sha256:95ade0bd4cf69e04e8b8f8ec2d197d9c9c4a9b6902e048dc7456bf6d82e12a80", - "sha256:9b88dc97ba86c96b964c3745a445d9a65f76fe21955a953064fe04adb63e9367", - "sha256:9c780d992f5d734432726b92a0c87bf1857c3d85082a8dea29cbf56e44a132b3", - "sha256:9f85200ea102276afdd3749ca94747f057bbb868d1c52921ee2446730b508d0f", - "sha256:a1cf98afa7ad5e7012454ca3fde254499a13f9d92fd50cb46118118a249a1355", - "sha256:a635aecf1047255576dbb0927cbf9a7aa4a68e9d54110cc3c926652d18f144e0", - "sha256:ae97504958d0bc58c1152045c170815d5c4f8af906561ce044b6358b43d0c97e", - "sha256:b06a5095a79384760625b5de3f83f40b3053a385fb893be8a106fbbd84c14980", - "sha256:b5c8dd9a386a66e50bd7fa22b7a49fb8ead2b3574d6bd69eb1caced6caea0803", - "sha256:bae6c561f11b444b258b1b4be2bdd1e1cf93cd1d80766b7e869a79db4543a8a8", - "sha256:bbb4448a05d261fae423d5c0b0974ad899f60825bc77eabad5a0c518e78448c2", - "sha256:bd6af61388be65a8701f5787362cb54adae20007e0cc67ca9221a4b95115583b", - "sha256:bf652839d16de91fe1cfb253e0a88db9a548796939533894e07f45d4bdf90a5f", - "sha256:d6d25b8a5c70e2334ed61a8da4c11cd9b97c6fbd980c406033f06e4463fda006", - "sha256:da057d3652e698b00746e47f06dbb513314f847421e857e32e1dc61c46f6c052", - "sha256:e0ed35d6d6122d0baa9a1b59ebca4ee302139f4cfb57dab85e4c73ab793ae7ed", - "sha256:e36560d001d4ba469d469b02037f2dd404421fd72277d9474efe9f03f83fced5", - "sha256:f4321692e7f299277e55f322329b2c972d93bb612d85f3fda8741bec5c6285ce", - "sha256:f75114c05ec56566da6b55122791cf5bb53d5aada96a98c016d6231e03132f76", - "sha256:fb4571efe86545b772a4630fee578c213c91cbcfd20347806e47fd4e782a18fe", - "sha256:fc97aa4b4fb928ff4d3b74da7c30b360d0cb3ede49a5a6e1fd9705f49aea1deb" + "sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862", + "sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d", + "sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3", + "sha256:1e2d69948e4132813b8d1131f29f9101bc2c915f26089a6d632001a5c1349672", + "sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0", + "sha256:27dcd6f46a21c18fa5e5deed92a43d4554e3df8d8ca5a47bf0615d6a5f39dbc9", + "sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee", + "sha256:3476fae43db72bd11f29a5147ae2f3cb22e2f1a91d575ef130d2bf49afd21c46", + "sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524", + "sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819", + "sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc", + "sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc", + "sha256:3e7bf4442b310ff154b7bb9d81eb2c016b7d597e364f97d72b1acc3817a0fdc1", + "sha256:3f0c8c6dfa6605ab8ff0611995ee30d4f9fcff89966cf562733b4008a3d60d82", + "sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81", + "sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6", + "sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d", + "sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2", + "sha256:52700dc63a4676669b341ba33520f4d6e43d3ca58d422e22ba66d1736b0a6e4c", + "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87", + "sha256:576eb384292b139821c41995523654ad82d1916da6a60cff129c715a6223ea84", + "sha256:5b0bf0effb196ed76b7ad883848143427a73c355ae8e569fa538365064188b8e", + "sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95", + "sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f", + "sha256:6d4c80667de2e36970ebf74f42d1088cc9ee7ef5f4e8c35eee1b40eafd33ca5b", + "sha256:730076207cb816138cf1af7f7237b208340a2c5e749707457d70705715c93b93", + "sha256:7687e22a31e976a0e7fc99c2f4d11ca45eff652a81eb8c8085e9609298916dcf", + "sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61", + "sha256:84b0daf226913133f899ea9b30618722d45feffa67e4fe867b0b5ae83a34060c", + "sha256:85765fdf4b27eb5086f05ac0491090fc76f4f2b28e09d9350c31aac25a5aaff8", + "sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d", + "sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c", + "sha256:98bbd754a422a0b123c66a4c341de0474cad4a5c10c164ceed6ea090f3563db4", + "sha256:993584fc821c58d5993521bfdcd31a4adf025c7d745bbd4d12ccfecf695af5ba", + "sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415", + "sha256:b291f0ee7961a597cbbcc77709374087fa2a9afe7bdb6a40dbbd9b127e79afee", + "sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d", + "sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9", + "sha256:bdf38ba2d393c7911ae989c3bbba510ebbcdf4ecbdbfec36272abe350c454075", + "sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f", + "sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7", + "sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681", + "sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329", + "sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1", + "sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf", + "sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c", + "sha256:e45ae4927759289c30ccba8d9fdce62bb414977ba158286b5ddaf8df2cddb5c5", + "sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b", + "sha256:ebbbba226f0a108a7366bf4b59bf0f30a12fd5e75100c630267d94d7f0ad20e5", + "sha256:ec79ff6159dffcc30853b2ad612ed572af86c92b5168aa3fc01a67b0fa40665e", + "sha256:f0936e08e0003f66bfd97e74ee530427707297b0d0361247e9b4f59ab78ddc8b", + "sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad", + "sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd", + "sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7", + "sha256:f9a7c509542db4eceed3dcf21ee5267ab565a83555c9b88a8109dcecc4709002", + "sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc" ], "markers": "python_version >= '3.8'", - "version": "==1.0.6" + "version": "==1.0.7" }, "packaging": { "hashes": [ - "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61", - "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f" + "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", + "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" ], "markers": "python_version >= '3.7'", - "version": "==23.1" + "version": "==23.2" }, "passlib": { "hashes": [ @@ -698,6 +687,62 @@ "markers": "python_version >= '3'", "version": "==0.17.16" }, + "ruamel.yaml.clib": { + "hashes": [ + "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d", + "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001", + "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462", + "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9", + "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b", + "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b", + "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615", + "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15", + "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b", + "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9", + "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675", + "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1", + "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899", + "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7", + "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7", + "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312", + "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa", + "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f", + "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91", + "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa", + "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b", + "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3", + "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334", + "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5", + "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3", + "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe", + "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3", + "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed", + "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337", + "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880", + "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d", + "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248", + "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d", + "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279", + "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf", + "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512", + "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069", + "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb", + "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942", + "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d", + "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31", + "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92", + "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd", + "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5", + "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28", + "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d", + "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1", + "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2", + "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875", + "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412" + ], + "markers": "python_version < '3.10' and platform_python_implementation == 'CPython'", + "version": "==0.2.8" + }, "sortedcontainers": { "hashes": [ "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", @@ -746,11 +791,12 @@ }, "werkzeug": { "hashes": [ - "sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8", - "sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528" + "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc", + "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10" ], + "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==2.3.7" + "version": "==3.0.1" }, "wsproto": { "hashes": [ @@ -906,11 +952,11 @@ }, "packaging": { "hashes": [ - "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61", - "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f" + "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", + "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" ], "markers": "python_version >= '3.7'", - "version": "==23.1" + "version": "==23.2" }, "pathspec": { "hashes": [ @@ -922,19 +968,19 @@ }, "platformdirs": { "hashes": [ - "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d", - "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d" + "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3", + "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e" ], "markers": "python_version >= '3.7'", - "version": "==3.10.0" + "version": "==3.11.0" }, "pycodestyle": { "hashes": [ - "sha256:259bcc17857d8a8b3b4a2327324b79e5f020a13c16074670f9c8c8f872ea76d0", - "sha256:5d1013ba8dc7895b548be5afb05740ca82454fd899971563d2ef625d090326f8" + "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f", + "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67" ], "markers": "python_version >= '3.8'", - "version": "==2.11.0" + "version": "==2.11.1" }, "pyflakes": { "hashes": [ diff --git a/src/test/regress/citus_tests/common.py b/src/test/regress/citus_tests/common.py index 53c9c7944..40c727189 100644 --- a/src/test/regress/citus_tests/common.py +++ b/src/test/regress/citus_tests/common.py @@ -581,6 +581,14 @@ class QueryRunner(ABC): with self.cur(**kwargs) as cur: cur.execute(query, params=params) + def sql_prepared(self, query, params=None, **kwargs): + """Run an SQL query, with prepare=True + + This opens a new connection and closes it once the query is done + """ + with self.cur(**kwargs) as cur: + cur.execute(query, params=params, prepare=True) + def sql_row(self, query, params=None, allow_empty_result=False, **kwargs): """Run an SQL query that returns a single row and returns this row diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index b28341e5c..b902a7998 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -125,7 +125,6 @@ DEPS = { "multi_mx_create_table": TestDeps( None, [ - "multi_test_helpers_superuser", "multi_mx_node_metadata", "multi_cluster_management", "multi_mx_function_table_reference", @@ -176,6 +175,42 @@ DEPS = { "grant_on_schema_propagation": TestDeps("minimal_schedule"), "propagate_extension_commands": TestDeps("minimal_schedule"), "multi_size_queries": TestDeps("base_schedule", ["multi_copy"]), + "multi_mx_node_metadata": TestDeps( + None, + [ + "multi_extension", + "multi_test_helpers", + "multi_test_helpers_superuser", + ], + ), + "multi_mx_function_table_reference": TestDeps( + None, + [ + "multi_cluster_management", + "remove_coordinator_from_metadata", + ], + # because it queries node group id and it changes as we add / remove nodes + repeatable=False, + ), + "multi_mx_add_coordinator": TestDeps( + None, + [ + "multi_cluster_management", + "remove_coordinator_from_metadata", + "multi_mx_function_table_reference", + ], + ), + "metadata_sync_helpers": TestDeps( + None, + [ + "multi_mx_node_metadata", + "multi_cluster_management", + ], + ), + "multi_utilities": TestDeps( + "minimal_schedule", + ["multi_data_types"], + ), } diff --git a/src/test/regress/citus_tests/test/test_prepared_statements.py b/src/test/regress/citus_tests/test/test_prepared_statements.py new file mode 100644 index 000000000..761ecc30c --- /dev/null +++ b/src/test/regress/citus_tests/test/test_prepared_statements.py @@ -0,0 +1,30 @@ +def test_call_param(cluster): + # create a distributed table and an associated distributed procedure + # to ensure parameterized CALL succeed, even when the param is the + # distribution key. + coord = cluster.coordinator + coord.sql("CREATE TABLE test(i int)") + coord.sql( + """ + CREATE PROCEDURE p(_i INT) LANGUAGE plpgsql AS $$ + BEGIN + INSERT INTO test(i) VALUES (_i); + END; $$ + """ + ) + sql = "CALL p(%s)" + + # prepare/exec before distributing + coord.sql_prepared(sql, (1,)) + + coord.sql("SELECT create_distributed_table('test', 'i')") + coord.sql( + "SELECT create_distributed_function('p(int)', distribution_arg_name := '_i', colocate_with := 'test')" + ) + + # prepare/exec after distribution + coord.sql_prepared(sql, (2,)) + + sum_i = coord.sql_value("select sum(i) from test;") + + assert sum_i == 3 diff --git a/src/test/regress/expected/create_role_propagation.out b/src/test/regress/expected/create_role_propagation.out index 59f7948a1..48310bdc3 100644 --- a/src/test/regress/expected/create_role_propagation.out +++ b/src/test/regress/expected/create_role_propagation.out @@ -40,18 +40,10 @@ SELECT master_remove_node('localhost', :worker_2_port); CREATE ROLE create_role_with_everything SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 105 PASSWORD 'strong_password123^' VALID UNTIL '2045-05-05 00:00:00.00+00' IN ROLE create_role, create_group ROLE create_user, create_group_2 ADMIN create_role_2, create_user_2; CREATE ROLE create_role_with_nothing NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 3 PASSWORD 'weakpassword' VALID UNTIL '2015-05-05 00:00:00.00+00'; --- show that creating role from worker node is only allowed when create role --- propagation is off +-- show that creating role from worker node is allowed \c - - - :worker_1_port CREATE ROLE role_on_worker; -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. -BEGIN; -SET citus.enable_create_role_propagation TO off; -CREATE ROLE role_on_worker; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -ROLLBACK; +DROP ROLE role_on_worker; \c - - - :master_port -- edge case role names CREATE ROLE "create_role'edge"; @@ -217,17 +209,17 @@ CREATE ROLE dist_role_3; CREATE ROLE dist_role_4; SET citus.enable_create_role_propagation TO OFF; CREATE ROLE non_dist_role_1 SUPERUSER; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE non_dist_role_2; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE non_dist_role_3; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE non_dist_role_4; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. SET citus.enable_create_role_propagation TO ON; SET ROLE dist_role_1; GRANT non_dist_role_1 TO non_dist_role_2; @@ -307,11 +299,11 @@ CREATE ROLE dist_mixed_3; CREATE ROLE dist_mixed_4; SET citus.enable_create_role_propagation TO OFF; CREATE ROLE nondist_mixed_1; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE nondist_mixed_2; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1, 2; role | member | grantor | admin_option --------------------------------------------------------------------- @@ -506,14 +498,14 @@ SELECT rolname, rolcanlogin FROM pg_authid WHERE rolname = 'create_role' OR roln -- test cascading grants SET citus.enable_create_role_propagation TO OFF; CREATE ROLE nondist_cascade_1; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE nondist_cascade_2; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE nondist_cascade_3; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. SET citus.enable_create_role_propagation TO ON; CREATE ROLE dist_cascade; GRANT nondist_cascade_1 TO nondist_cascade_2; @@ -696,3 +688,4 @@ SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; (0 rows) \c - - - :master_port +DROP ROLE nondist_cascade_1, nondist_cascade_2, nondist_cascade_3, dist_cascade; diff --git a/src/test/regress/expected/distributed_domain.out b/src/test/regress/expected/distributed_domain.out index 30e388803..6fdb348eb 100644 --- a/src/test/regress/expected/distributed_domain.out +++ b/src/test/regress/expected/distributed_domain.out @@ -680,16 +680,7 @@ SELECT * FROM use_age_invalid ORDER BY 1; -- verify we can validate a constraint that is already validated, can happen when we add a node while a domain constraint was not validated ALTER DOMAIN age_invalid VALIDATE CONSTRAINT check_age_positive; -- test changing the owner of a domain -SET client_min_messages TO error; -SELECT 1 FROM run_command_on_workers($$ CREATE ROLE domain_owner; $$); - ?column? ---------------------------------------------------------------------- - 1 - 1 -(2 rows) - CREATE ROLE domain_owner; -RESET client_min_messages; CREATE DOMAIN alter_domain_owner AS int; ALTER DOMAIN alter_domain_owner OWNER TO domain_owner; SELECT u.rolname diff --git a/src/test/regress/expected/failure_distributed_results.out b/src/test/regress/expected/failure_distributed_results.out index fc97c9af6..a316763e3 100644 --- a/src/test/regress/expected/failure_distributed_results.out +++ b/src/test/regress/expected/failure_distributed_results.out @@ -14,6 +14,8 @@ SELECT citus.mitmproxy('conn.allow()'); (1 row) SET citus.next_shard_id TO 100800; +-- Needed because of issue #7306 +SET citus.force_max_query_parallelization TO true; -- always try the 1st replica before the 2nd replica. SET citus.task_assignment_policy TO 'first-replica'; -- diff --git a/src/test/regress/expected/insert_select_connection_leak.out b/src/test/regress/expected/insert_select_connection_leak.out index 8a983acd5..b342ecde1 100644 --- a/src/test/regress/expected/insert_select_connection_leak.out +++ b/src/test/regress/expected/insert_select_connection_leak.out @@ -47,16 +47,16 @@ INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; -SELECT worker_connection_count(:worker_1_port) - :worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 (1 row) END; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 @@ -67,8 +67,8 @@ BEGIN; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; ROLLBACK; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 @@ -84,16 +84,16 @@ SAVEPOINT s1; INSERT INTO target_table SELECT a, CASE WHEN a < 50 THEN b ELSE null END FROM source_table; ERROR: null value in column "b" violates not-null constraint ROLLBACK TO SAVEPOINT s1; -SELECT worker_connection_count(:worker_1_port) - :worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 (1 row) END; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 diff --git a/src/test/regress/expected/isolation_get_all_active_transactions.out b/src/test/regress/expected/isolation_get_all_active_transactions.out index a9739a826..73610a455 100644 --- a/src/test/regress/expected/isolation_get_all_active_transactions.out +++ b/src/test/regress/expected/isolation_get_all_active_transactions.out @@ -94,7 +94,7 @@ step s2-commit: COMMIT; -starting permutation: s4-record-pid s3-show-activity s5-kill s3-show-activity +starting permutation: s4-record-pid s3-show-activity s5-kill s3-wait-backend-termination step s4-record-pid: SELECT pg_backend_pid() INTO selected_pid; @@ -115,12 +115,22 @@ pg_terminate_backend t (1 row) -step s3-show-activity: +step s3-wait-backend-termination: SET ROLE postgres; - select count(*) from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid); - -count ---------------------------------------------------------------------- - 0 -(1 row) + DO $$ + DECLARE + i int; + BEGIN + i := 0; + -- try for 5 sec then timeout + WHILE (select count(*) > 0 from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid)) + LOOP + PERFORM pg_sleep(0.1); + i := i + 1; + IF i > 50 THEN + RAISE EXCEPTION 'Timeout while waiting for backend to terminate'; + END IF; + END LOOP; + END; + $$; diff --git a/src/test/regress/expected/issue_5763.out b/src/test/regress/expected/issue_5763.out index aa6c4f35b..864297397 100644 --- a/src/test/regress/expected/issue_5763.out +++ b/src/test/regress/expected/issue_5763.out @@ -28,8 +28,8 @@ DROP USER issue_5763_3; -- test non-distributed role SET citus.enable_create_role_propagation TO off; CREATE USER issue_5763_4 WITH SUPERUSER; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. \c - issue_5763_4 - :master_port set citus.enable_ddl_propagation = off; CREATE SCHEMA issue_5763_sc_4; diff --git a/src/test/regress/expected/logical_replication.out b/src/test/regress/expected/logical_replication.out index 8a3e96da9..b5a36125a 100644 --- a/src/test/regress/expected/logical_replication.out +++ b/src/test/regress/expected/logical_replication.out @@ -32,23 +32,21 @@ CREATE SUBSCRIPTION citus_shard_move_subscription_:postgres_oid PUBLICATION citus_shard_move_publication_:postgres_oid WITH (enabled=false, slot_name=citus_shard_move_slot_:postgres_oid); NOTICE: created replication slot "citus_shard_move_slot_10" on publisher -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 1 + citus_shard_move_subscription_10 (1 row) -SELECT count(*) from pg_publication; - count +SELECT pubname from pg_publication; + pubname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_replication_slots; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) SELECT count(*) FROM dist; count @@ -58,22 +56,21 @@ SELECT count(*) FROM dist; \c - - - :worker_1_port SET search_path TO logical_replication; -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 0 +(0 rows) + +SELECT pubname from pg_publication; + pubname +--------------------------------------------------------------------- + citus_shard_move_publication_10 (1 row) -SELECT count(*) from pg_publication; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 1 -(1 row) - -SELECT count(*) from pg_replication_slots; - count ---------------------------------------------------------------------- - 1 + citus_shard_move_slot_10 (1 row) SELECT count(*) FROM dist; @@ -90,25 +87,29 @@ select citus_move_shard_placement(6830002, 'localhost', :worker_1_port, 'localho (1 row) +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + -- the subscription is still there, as there is no cleanup record for it -- we have created it manually -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 1 + citus_shard_move_subscription_10 (1 row) -SELECT count(*) from pg_publication; - count +SELECT pubname from pg_publication; + pubname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_replication_slots; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) SELECT count(*) from dist; count @@ -120,22 +121,21 @@ SELECT count(*) from dist; SET search_path TO logical_replication; -- the publication and repslot are still there, as there are no cleanup records for them -- we have created them manually -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 0 +(0 rows) + +SELECT pubname from pg_publication; + pubname +--------------------------------------------------------------------- + citus_shard_move_publication_10 (1 row) -SELECT count(*) from pg_publication; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 1 -(1 row) - -SELECT count(*) from pg_replication_slots; - count ---------------------------------------------------------------------- - 1 + citus_shard_move_slot_10 (1 row) SELECT count(*) from dist; @@ -153,23 +153,20 @@ SELECT pg_drop_replication_slot('citus_shard_move_slot_' || :postgres_oid); \c - - - :worker_2_port SET search_path TO logical_replication; -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_publication; - count +SELECT pubname from pg_publication; + pubname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_replication_slots; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) SELECT count(*) from dist; count diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index 29d62c46a..a41ac9d5f 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -27,8 +27,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; --- in a distributed transaction and the application name is Citus --- but we are on the coordinator, so still not allowed +-- in a distributed transaction and the application name is Citus, allowed. BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); assign_distributed_transaction_id @@ -38,7 +37,11 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); -ERROR: This is an internal Citus function can only be used in a distributed transaction + citus_internal_add_partition_metadata +--------------------------------------------------------------------- + +(1 row) + ROLLBACK; \c - postgres - \c - - - :worker_1_port diff --git a/src/test/regress/expected/multi_mx_add_coordinator.out b/src/test/regress/expected/multi_mx_add_coordinator.out index e810b715e..42bcd6647 100644 --- a/src/test/regress/expected/multi_mx_add_coordinator.out +++ b/src/test/regress/expected/multi_mx_add_coordinator.out @@ -70,38 +70,43 @@ SELECT create_reference_table('ref'); (1 row) \c - - - :worker_1_port --- alter role from mx worker isn't allowed when alter role propagation is on -SET citus.enable_alter_role_propagation TO ON; -ALTER ROLE reprefuser WITH CREATEROLE; -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. --- to alter role locally disable alter role propagation first +-- to alter role locally, disable alter role propagation first SET citus.enable_alter_role_propagation TO OFF; ALTER ROLE reprefuser WITH CREATEROLE; -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; - rolcreatedb | rolcreaterole +SELECT result from run_command_on_all_nodes( + $$ + SELECT to_jsonb(q2.*) FROM ( + SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser' + ) q2 + $$ +) ORDER BY result; + result --------------------------------------------------------------------- - t | t -(1 row) + {"rolcreatedb": true, "rolcreaterole": false} + {"rolcreatedb": true, "rolcreaterole": false} + {"rolcreatedb": true, "rolcreaterole": true} +(3 rows) -RESET citus.enable_alter_role_propagation; -\c - - - :worker_2_port --- show that altering role locally on worker doesn't propagated to other worker -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; - rolcreatedb | rolcreaterole +-- alter role from mx worker is allowed +SET citus.enable_alter_role_propagation TO ON; +ALTER ROLE reprefuser WITH CREATEROLE; +-- show that altering role locally on worker is propagated to coordinator and to other workers too +SELECT result from run_command_on_all_nodes( + $$ + SELECT to_jsonb(q2.*) FROM ( + SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser' + ) q2 + $$ +) ORDER BY result; + result --------------------------------------------------------------------- - t | f -(1 row) + {"rolcreatedb": true, "rolcreaterole": true} + {"rolcreatedb": true, "rolcreaterole": true} + {"rolcreatedb": true, "rolcreaterole": true} +(3 rows) \c - - - :master_port SET search_path TO mx_add_coordinator,public; --- show that altering role locally on worker doesn't propagated to coordinator -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; - rolcreatedb | rolcreaterole ---------------------------------------------------------------------- - t | f -(1 row) - SET citus.log_local_commands TO ON; SET client_min_messages TO DEBUG; -- if the placement policy is not round-robin, SELECTs on the reference @@ -124,7 +129,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM mx_add_coo 0 (1 row) --- test that distributed functions also use local execution +-- test that distributed functions also use sequential execution CREATE OR REPLACE FUNCTION my_group_id() RETURNS void LANGUAGE plpgsql @@ -365,5 +370,6 @@ SELECT verify_metadata('localhost', :worker_1_port), SET client_min_messages TO error; DROP SCHEMA mx_add_coordinator CASCADE; +DROP USER reprefuser; SET search_path TO DEFAULT; RESET client_min_messages; diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index ac7f90826..b9d3f7faa 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -3,6 +3,7 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1220000; +SET client_min_messages TO WARNING; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -15,6 +16,9 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port); (1 row) +-- cannot drop them at the end of the test file as other tests depend on them +DROP SCHEMA IF EXISTS citus_mx_test_schema, citus_mx_test_schema_join_1, citus_mx_test_schema_join_2 CASCADE; +DROP TABLE IF EXISTS nation_hash, lineitem_mx, orders_mx, customer_mx, nation_mx, part_mx, supplier_mx, mx_ddl_table, limit_orders_mx, multiple_hash_mx, app_analytics_events_mx, researchers_mx, labs_mx, objects_mx, articles_hash_mx, articles_single_shard_hash_mx, company_employees_mx; -- create schema to test schema support CREATE SCHEMA citus_mx_test_schema; CREATE SCHEMA citus_mx_test_schema_join_1; @@ -42,7 +46,7 @@ BEGIN END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) +CREATE OR REPLACE FUNCTION public.immutable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, @@ -65,14 +69,16 @@ SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset \endif CREATE COLLATION citus_mx_test_schema.english (LOCALE=:current_locale); CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text); -CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); +CREATE TYPE citus_mx_test_schema.order_side_mx AS ENUM ('buy', 'sell'); -- now create required stuff in the worker 1 \c - - - :worker_1_port +SET client_min_messages TO WARNING; -- show that we do not support creating citus local tables from mx workers for now CREATE TABLE citus_local_table(a int); SELECT citus_add_local_table_to_metadata('citus_local_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. +DROP TABLE citus_local_table; SET search_path TO citus_mx_test_schema; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( @@ -85,6 +91,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( ); -- now create required stuff in the worker 2 \c - - - :worker_2_port +SET client_min_messages TO WARNING; SET search_path TO citus_mx_test_schema; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( @@ -97,6 +104,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( ); -- connect back to the master, and do some more tests \c - - - :master_port +SET client_min_messages TO WARNING; SET citus.shard_replication_factor TO 1; SET search_path TO public; CREATE TABLE nation_hash( @@ -315,7 +323,7 @@ CREATE TABLE limit_orders_mx ( symbol text NOT NULL, bidder_id bigint NOT NULL, placed_at timestamp NOT NULL, - kind order_side_mx NOT NULL, + kind citus_mx_test_schema.order_side_mx NOT NULL, limit_price decimal NOT NULL DEFAULT 0.00 CHECK (limit_price >= 0.00) ); SET citus.shard_count TO 2; @@ -473,6 +481,7 @@ ORDER BY table_name::text; (23 rows) \c - - - :worker_1_port +SET client_min_messages TO WARNING; SELECT table_name, citus_table_type, distribution_column, shard_count, table_owner FROM citus_tables ORDER BY table_name::text; @@ -978,6 +987,6 @@ SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards OR (469 rows) -- Show that altering type name is not supported from worker node -ALTER TYPE order_side_mx RENAME TO temp_order_side_mx; +ALTER TYPE citus_mx_test_schema.order_side_mx RENAME TO temp_order_side_mx; ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition.out b/src/test/regress/expected/multi_mx_insert_select_repartition.out index 62f197c30..a3912ec8e 100644 --- a/src/test/regress/expected/multi_mx_insert_select_repartition.out +++ b/src/test/regress/expected/multi_mx_insert_select_repartition.out @@ -103,10 +103,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_i 4 (1 row) + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_xxxxx_from_4213582_to_0','repartitioned_results_xxxxx_from_4213584_to_0']::text[],'localhost',57638) bytes NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a a diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out b/src/test/regress/expected/multi_mx_insert_select_repartition_0.out index 15deba0c0..62271f9a7 100644 --- a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out +++ b/src/test/regress/expected/multi_mx_insert_select_repartition_0.out @@ -103,10 +103,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_i 4 (1 row) + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_xxxxx_from_4213582_to_0','repartitioned_results_xxxxx_from_4213584_to_0']::text[],'localhost',57638) bytes NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a a diff --git a/src/test/regress/expected/multi_mx_node_metadata.out b/src/test/regress/expected/multi_mx_node_metadata.out index 707dcc472..6a152b515 100644 --- a/src/test/regress/expected/multi_mx_node_metadata.out +++ b/src/test/regress/expected/multi_mx_node_metadata.out @@ -9,7 +9,7 @@ SET citus.shard_count TO 8; SET citus.shard_replication_factor TO 1; \set VERBOSITY terse -- Simulates a readonly node by setting default_transaction_read_only. -CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN) +CREATE OR REPLACE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN) RETURNS TEXT LANGUAGE sql AS $$ @@ -27,7 +27,7 @@ CREATE OR REPLACE FUNCTION raise_error_in_metadata_sync() RETURNS void LANGUAGE C STRICT AS 'citus'; -CREATE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$ +CREATE OR REPLACE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$ declare counter integer := -1; begin @@ -846,7 +846,22 @@ SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; db_to_drop (1 row) -DROP DATABASE db_to_drop; +DO $$ +DECLARE + i int := 0; +BEGIN + WHILE NOT (SELECT bool_and(success) from run_command_on_all_nodes('DROP DATABASE IF EXISTS db_to_drop')) + LOOP + BEGIN + i := i + 1; + IF i > 5 THEN + RAISE EXCEPTION 'DROP DATABASE timed out'; + END IF; + PERFORM pg_sleep(1); + END; + END LOOP; +END; +$$; SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; datname --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_utilities.out b/src/test/regress/expected/multi_utilities.out index b82e54f16..d2b0940ed 100644 --- a/src/test/regress/expected/multi_utilities.out +++ b/src/test/regress/expected/multi_utilities.out @@ -348,6 +348,8 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM local_vacuum_table; +VACUUM local_vacuum_table; +VACUUM local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 25000000 THEN 22500000 ELSE s END FROM pg_total_relation_size('local_vacuum_table') s ; s @@ -401,6 +403,8 @@ VACUUM (DISABLE_PAGE_SKIPPING false) local_vacuum_table; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 50000000 AND 70000000 THEN 60000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; size @@ -411,6 +415,8 @@ FROM pg_total_relation_size('local_vacuum_table') s ; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 49999999 THEN 35000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; size @@ -422,10 +428,14 @@ FROM pg_total_relation_size('local_vacuum_table') s ; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; vacuum (TRUNCATE false) local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; SELECT pg_total_relation_size('local_vacuum_table') as size1 \gset insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; vacuum (TRUNCATE true) local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; SELECT pg_total_relation_size('local_vacuum_table') as size2 \gset SELECT :size1 > :size2 as truncate_less_size; truncate_less_size diff --git a/src/test/regress/expected/role_command_from_any_node.out b/src/test/regress/expected/role_command_from_any_node.out new file mode 100644 index 000000000..e8700a204 --- /dev/null +++ b/src/test/regress/expected/role_command_from_any_node.out @@ -0,0 +1,274 @@ +-- idempotently remove the coordinator from metadata +SELECT COUNT(citus_remove_node(nodename, nodeport)) >= 0 FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :master_port; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- make sure that CREATE ROLE from workers is not supported when coordinator is not added to metadata +SELECT result FROM run_command_on_workers('CREATE ROLE test_role'); + result +--------------------------------------------------------------------- + ERROR: coordinator is not added to the metadata + ERROR: coordinator is not added to the metadata +(2 rows) + +\c - - - :master_port +CREATE SCHEMA role_command_from_any_node; +SET search_path TO role_command_from_any_node; +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +CREATE OR REPLACE FUNCTION check_role_on_all_nodes(p_role_name text) +RETURNS TABLE (node_type text, result text) +AS $func$ +DECLARE + v_worker_query text; +BEGIN + v_worker_query := format( + $$ + SELECT to_jsonb(q1.*) FROM ( + SELECT + ( + SELECT COUNT(*) = 1 FROM pg_roles WHERE rolname = '%s' + ) AS role_exists, + ( + SELECT to_jsonb(q.*) FROM (SELECT * FROM pg_roles WHERE rolname = '%s') q + ) AS role_properties, + ( + SELECT COUNT(*) = 1 + FROM pg_dist_object + WHERE objid = (SELECT oid FROM pg_roles WHERE rolname = '%s') + ) AS pg_dist_object_record_for_role_exists, + ( + SELECT COUNT(*) > 0 + FROM pg_dist_object + WHERE classid = 1260 AND objid NOT IN (SELECT oid FROM pg_roles) + ) AS stale_pg_dist_object_record_for_a_role_exists + ) q1 + $$, + p_role_name, p_role_name, p_role_name + ); + + RETURN QUERY + SELECT + CASE WHEN (groupid = 0 AND groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'coordinator (local)' + WHEN (groupid = 0) THEN 'coordinator (remote)' + WHEN (groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'worker node (local)' + ELSE 'worker node (remote)' + END AS node_type, + q2.result + FROM run_command_on_all_nodes(v_worker_query) q2 + JOIN pg_dist_node USING (nodeid); +END; +$func$ LANGUAGE plpgsql; +\c - - - :worker_1_port +SET search_path TO role_command_from_any_node; +SET client_min_messages TO NOTICE; +SET citus.enable_create_role_propagation TO OFF; +CREATE ROLE test_role; +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +CREATE ROLE test_role; +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +SET citus.enable_create_role_propagation TO ON; +-- doesn't fail even if the role doesn't exist on other nodes +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +CREATE ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +CREATE ROLE test_role; +SET citus.enable_alter_role_propagation TO OFF; +ALTER ROLE test_role RENAME TO test_role_renamed; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +ALTER ROLE test_role_renamed RENAME TO test_role; +SET citus.enable_alter_role_propagation TO ON; +ALTER ROLE test_role RENAME TO test_role_renamed; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +SET citus.enable_alter_role_propagation TO OFF; +ALTER ROLE test_role_renamed CREATEDB; +SET citus.enable_alter_role_propagation TO ON; +SELECT node_type, (result::jsonb)->'role_properties'->'rolcreatedb' as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | false + worker node (local) | true + worker node (remote) | false +(3 rows) + +ALTER ROLE test_role_renamed CREATEDB; +SELECT node_type, (result::jsonb)->'role_properties'->'rolcreatedb' as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | true + worker node (local) | true + worker node (remote) | true +(3 rows) + +SET citus.enable_alter_role_set_propagation TO ON; +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF; +SELECT result FROM run_command_on_all_nodes('SHOW enable_hashjoin') ORDER BY result; + result +--------------------------------------------------------------------- + off + off + off +(3 rows) + +SET citus.enable_alter_role_set_propagation TO OFF; +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO ON; +SELECT result FROM run_command_on_all_nodes('SHOW enable_hashjoin') ORDER BY result; + result +--------------------------------------------------------------------- + off + off + on +(3 rows) + +SET citus.enable_alter_role_set_propagation TO ON; +ALTER ROLE current_user IN DATABASE "regression" RESET enable_hashjoin; +CREATE ROLE another_user; +SET citus.enable_create_role_propagation TO OFF; +GRANT another_user TO test_role_renamed; +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_auth_members WHERE roleid = 'another_user'::regrole AND member = 'test_role_renamed'::regrole +$$) ORDER BY result; + result +--------------------------------------------------------------------- + f + f + t +(3 rows) + +SET citus.enable_create_role_propagation TO ON; +SET client_min_messages TO ERROR; +GRANT another_user TO test_role_renamed; +SET client_min_messages TO NOTICE; +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_auth_members WHERE roleid = 'another_user'::regrole AND member = 'test_role_renamed'::regrole +$$) ORDER BY result; + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +\c - - - :master_port +SET search_path TO role_command_from_any_node; +SET client_min_messages TO NOTICE; +SELECT citus_remove_node('localhost', :worker_1_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +SELECT 1 FROM citus_add_node('localhost', :worker_1_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- make sure that citus_add_node() propagates the roles created via a worker +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +SELECT citus_remove_node('localhost', :master_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +\c - - - :worker_1_port +-- they fail because the coordinator is not added to metadata +DROP ROLE test_role_renamed; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +ALTER ROLE test_role_renamed RENAME TO test_role; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +ALTER ROLE test_role_renamed CREATEDB; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +GRANT another_user TO test_role_renamed; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +\c - - - :master_port +DROP ROLE test_role_renamed, another_user; +SET client_min_messages TO WARNING; +DROP SCHEMA role_command_from_any_node CASCADE; diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index f5b76c14c..2c399f24a 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -328,8 +328,8 @@ RESET citus.shard_replication_factor; -- test some more error handling. We create them later there. SET citus.enable_create_role_propagation TO OFF; CREATE USER testrole; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. GRANT ALL ON SCHEMA public TO testrole; ERROR: role "testrole" does not exist CONTEXT: while executing command on localhost:xxxxx @@ -731,8 +731,8 @@ ERROR: target node localhost:xxxxx is not responsive \c - - - :worker_1_port SET citus.enable_create_role_propagation TO OFF; CREATE USER testrole; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. GRANT ALL ON SCHEMA public TO testrole; ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. @@ -745,8 +745,8 @@ ERROR: source node localhost:xxxxx is not responsive \c - - - :worker_2_port SET citus.enable_create_role_propagation TO OFF; CREATE USER testrole; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. GRANT ALL ON SCHEMA public TO testrole; ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. diff --git a/src/test/regress/expected/text_search.out b/src/test/regress/expected/text_search.out index b9934a1d4..6c5b387ba 100644 --- a/src/test/regress/expected/text_search.out +++ b/src/test/regress/expected/text_search.out @@ -374,12 +374,21 @@ SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; (2 rows) -- verify they are all removed locally -SELECT 'text_search.config1'::regconfig; -ERROR: text search configuration "text_search.config1" does not exist -SELECT 'text_search.config2'::regconfig; -ERROR: text search configuration "text_search.config2" does not exist -SELECT 'text_search.config3'::regconfig; -ERROR: text search configuration "text_search.config3" does not exist +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config1' AND cfgnamespace = 'text_search'::regnamespace; + ?column? +--------------------------------------------------------------------- +(0 rows) + +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config2' AND cfgnamespace = 'text_search'::regnamespace; + ?column? +--------------------------------------------------------------------- +(0 rows) + +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config3' AND cfgnamespace = 'text_search'::regnamespace; + ?column? +--------------------------------------------------------------------- +(0 rows) + -- verify that indexes created concurrently that would propagate a TEXT SEARCH CONFIGURATION object SET citus.enable_ddl_propagation TO off; CREATE TEXT SEARCH CONFIGURATION concurrent_index_config ( PARSER = default ); @@ -434,12 +443,12 @@ $$) ORDER BY 1,2; CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = french ); -- now we expect manually_created_wrongly(citus_backup_XXX) to show up when querying the configurations SELECT * FROM run_command_on_workers($$ - SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; + SELECT array_agg(cfgname ORDER BY cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; $$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | {manually_created_wrongly(citus_backup_0),manually_created_wrongly} - localhost | 57638 | t | {manually_created_wrongly(citus_backup_0),manually_created_wrongly} + localhost | 57637 | t | {manually_created_wrongly,manually_created_wrongly(citus_backup_0)} + localhost | 57638 | t | {manually_created_wrongly,manually_created_wrongly(citus_backup_0)} (2 rows) -- verify the objects get reused appropriately when the specification is the same @@ -458,7 +467,7 @@ CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = f -- now we don't expect manually_created_correct(citus_backup_XXX) to show up when querying the configurations as the -- original one is reused SELECT * FROM run_command_on_workers($$ - SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; + SELECT array_agg(cfgname ORDER BY cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; $$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index c7093b2e5..7ee1ffa80 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -27,6 +27,7 @@ test: multi_cluster_management test: non_super_user_object_metadata test: propagate_foreign_servers test: alter_role_propagation +test: role_command_from_any_node test: propagate_extension_commands test: escape_extension_name test: ref_citus_local_fkeys @@ -163,7 +164,8 @@ test: with_executors with_join with_partitioning with_transactions with_dml # Tests around DDL statements run on distributed tables # ---------- test: multi_index_statements -test: multi_alter_table_statements alter_table_add_column +test: multi_alter_table_statements +test: alter_table_add_column test: multi_alter_table_add_constraints test: multi_alter_table_add_constraints_without_name test: multi_alter_table_add_foreign_key_without_name diff --git a/src/test/regress/spec/isolation_get_all_active_transactions.spec b/src/test/regress/spec/isolation_get_all_active_transactions.spec index 497b3a58a..8a2d5a5c6 100644 --- a/src/test/regress/spec/isolation_get_all_active_transactions.spec +++ b/src/test/regress/spec/isolation_get_all_active_transactions.spec @@ -107,6 +107,29 @@ step "s3-show-activity" select count(*) from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid); } +step "s3-wait-backend-termination" +{ + SET ROLE postgres; + + DO $$ + DECLARE + i int; + BEGIN + i := 0; + + -- try for 5 sec then timeout + WHILE (select count(*) > 0 from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid)) + LOOP + PERFORM pg_sleep(0.1); + i := i + 1; + IF i > 50 THEN + RAISE EXCEPTION 'Timeout while waiting for backend to terminate'; + END IF; + END LOOP; + END; + $$; +} + session "s4" step "s4-record-pid" @@ -123,4 +146,4 @@ step "s5-kill" permutation "s1-grant" "s1-begin-insert" "s2-begin-insert" "s3-as-admin" "s3-as-user-1" "s3-as-readonly" "s3-as-monitor" "s1-commit" "s2-commit" -permutation "s4-record-pid" "s3-show-activity" "s5-kill" "s3-show-activity" +permutation "s4-record-pid" "s3-show-activity" "s5-kill" "s3-wait-backend-termination" diff --git a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec index 67c20a2b2..411faf889 100644 --- a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec +++ b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec @@ -22,6 +22,7 @@ setup teardown { + SELECT wait_until_metadata_sync(); DROP FUNCTION trigger_metadata_sync(); DROP TABLE deadlock_detection_test; DROP TABLE t2; diff --git a/src/test/regress/sql/create_role_propagation.sql b/src/test/regress/sql/create_role_propagation.sql index 027e4f72e..fa32cf2d2 100644 --- a/src/test/regress/sql/create_role_propagation.sql +++ b/src/test/regress/sql/create_role_propagation.sql @@ -25,15 +25,10 @@ SELECT master_remove_node('localhost', :worker_2_port); CREATE ROLE create_role_with_everything SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 105 PASSWORD 'strong_password123^' VALID UNTIL '2045-05-05 00:00:00.00+00' IN ROLE create_role, create_group ROLE create_user, create_group_2 ADMIN create_role_2, create_user_2; CREATE ROLE create_role_with_nothing NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 3 PASSWORD 'weakpassword' VALID UNTIL '2015-05-05 00:00:00.00+00'; --- show that creating role from worker node is only allowed when create role --- propagation is off +-- show that creating role from worker node is allowed \c - - - :worker_1_port CREATE ROLE role_on_worker; - -BEGIN; -SET citus.enable_create_role_propagation TO off; -CREATE ROLE role_on_worker; -ROLLBACK; +DROP ROLE role_on_worker; \c - - - :master_port @@ -277,3 +272,5 @@ SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; \c - - - :worker_1_port SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; \c - - - :master_port + +DROP ROLE nondist_cascade_1, nondist_cascade_2, nondist_cascade_3, dist_cascade; diff --git a/src/test/regress/sql/distributed_domain.sql b/src/test/regress/sql/distributed_domain.sql index 5bf3bd6a8..0850c99ee 100644 --- a/src/test/regress/sql/distributed_domain.sql +++ b/src/test/regress/sql/distributed_domain.sql @@ -349,10 +349,7 @@ SELECT * FROM use_age_invalid ORDER BY 1; ALTER DOMAIN age_invalid VALIDATE CONSTRAINT check_age_positive; -- test changing the owner of a domain -SET client_min_messages TO error; -SELECT 1 FROM run_command_on_workers($$ CREATE ROLE domain_owner; $$); CREATE ROLE domain_owner; -RESET client_min_messages; CREATE DOMAIN alter_domain_owner AS int; ALTER DOMAIN alter_domain_owner OWNER TO domain_owner; diff --git a/src/test/regress/sql/failure_distributed_results.sql b/src/test/regress/sql/failure_distributed_results.sql index 95e4d5513..93e4a9a33 100644 --- a/src/test/regress/sql/failure_distributed_results.sql +++ b/src/test/regress/sql/failure_distributed_results.sql @@ -15,6 +15,8 @@ SET client_min_messages TO WARNING; SELECT citus.mitmproxy('conn.allow()'); SET citus.next_shard_id TO 100800; +-- Needed because of issue #7306 +SET citus.force_max_query_parallelization TO true; -- always try the 1st replica before the 2nd replica. SET citus.task_assignment_policy TO 'first-replica'; diff --git a/src/test/regress/sql/insert_select_connection_leak.sql b/src/test/regress/sql/insert_select_connection_leak.sql index 05afb10a0..e138f6c4d 100644 --- a/src/test/regress/sql/insert_select_connection_leak.sql +++ b/src/test/regress/sql/insert_select_connection_leak.sql @@ -33,12 +33,12 @@ INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; -SELECT worker_connection_count(:worker_1_port) - :worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :worker_2_connections) AS leaked_worker_2_connections; END; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; -- ROLLBACK BEGIN; @@ -46,8 +46,8 @@ INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; ROLLBACK; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; \set VERBOSITY TERSE @@ -59,12 +59,12 @@ SELECT worker_connection_count(:worker_1_port) AS worker_1_connections, SAVEPOINT s1; INSERT INTO target_table SELECT a, CASE WHEN a < 50 THEN b ELSE null END FROM source_table; ROLLBACK TO SAVEPOINT s1; -SELECT worker_connection_count(:worker_1_port) - :worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :worker_2_connections) AS leaked_worker_2_connections; END; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; SET client_min_messages TO WARNING; DROP SCHEMA insert_select_connection_leak CASCADE; diff --git a/src/test/regress/sql/logical_replication.sql b/src/test/regress/sql/logical_replication.sql index 3f8e048ca..a85c70b08 100644 --- a/src/test/regress/sql/logical_replication.sql +++ b/src/test/regress/sql/logical_replication.sql @@ -35,17 +35,17 @@ CREATE SUBSCRIPTION citus_shard_move_subscription_:postgres_oid WITH (enabled=false, slot_name=citus_shard_move_slot_:postgres_oid); -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) FROM dist; \c - - - :worker_1_port SET search_path TO logical_replication; -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) FROM dist; \c - - - :master_port @@ -53,11 +53,13 @@ SET search_path TO logical_replication; select citus_move_shard_placement(6830002, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); +SELECT public.wait_for_resource_cleanup(); + -- the subscription is still there, as there is no cleanup record for it -- we have created it manually -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) from dist; \c - - - :worker_1_port @@ -65,9 +67,9 @@ SET search_path TO logical_replication; -- the publication and repslot are still there, as there are no cleanup records for them -- we have created them manually -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) from dist; DROP PUBLICATION citus_shard_move_publication_:postgres_oid; @@ -76,9 +78,9 @@ SELECT pg_drop_replication_slot('citus_shard_move_slot_' || :postgres_oid); \c - - - :worker_2_port SET search_path TO logical_replication; -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) from dist; \c - - - :master_port diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index a4044bab3..642b2f708 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -24,8 +24,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; --- in a distributed transaction and the application name is Citus --- but we are on the coordinator, so still not allowed +-- in a distributed transaction and the application name is Citus, allowed. BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; diff --git a/src/test/regress/sql/multi_mx_add_coordinator.sql b/src/test/regress/sql/multi_mx_add_coordinator.sql index 47053cd28..a7ab2749a 100644 --- a/src/test/regress/sql/multi_mx_add_coordinator.sql +++ b/src/test/regress/sql/multi_mx_add_coordinator.sql @@ -41,23 +41,33 @@ CREATE TABLE ref(groupid int); SELECT create_reference_table('ref'); \c - - - :worker_1_port --- alter role from mx worker isn't allowed when alter role propagation is on -SET citus.enable_alter_role_propagation TO ON; -ALTER ROLE reprefuser WITH CREATEROLE; --- to alter role locally disable alter role propagation first +-- to alter role locally, disable alter role propagation first SET citus.enable_alter_role_propagation TO OFF; ALTER ROLE reprefuser WITH CREATEROLE; -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; -RESET citus.enable_alter_role_propagation; -\c - - - :worker_2_port --- show that altering role locally on worker doesn't propagated to other worker -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; +SELECT result from run_command_on_all_nodes( + $$ + SELECT to_jsonb(q2.*) FROM ( + SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser' + ) q2 + $$ +) ORDER BY result; + +-- alter role from mx worker is allowed +SET citus.enable_alter_role_propagation TO ON; +ALTER ROLE reprefuser WITH CREATEROLE; + +-- show that altering role locally on worker is propagated to coordinator and to other workers too +SELECT result from run_command_on_all_nodes( + $$ + SELECT to_jsonb(q2.*) FROM ( + SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser' + ) q2 + $$ +) ORDER BY result; \c - - - :master_port SET search_path TO mx_add_coordinator,public; --- show that altering role locally on worker doesn't propagated to coordinator -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; SET citus.log_local_commands TO ON; SET client_min_messages TO DEBUG; @@ -67,7 +77,7 @@ SET client_min_messages TO DEBUG; SELECT count(*) FROM ref; SELECT count(*) FROM ref; --- test that distributed functions also use local execution +-- test that distributed functions also use sequential execution CREATE OR REPLACE FUNCTION my_group_id() RETURNS void LANGUAGE plpgsql @@ -190,5 +200,6 @@ SELECT verify_metadata('localhost', :worker_1_port), SET client_min_messages TO error; DROP SCHEMA mx_add_coordinator CASCADE; +DROP USER reprefuser; SET search_path TO DEFAULT; RESET client_min_messages; diff --git a/src/test/regress/sql/multi_mx_create_table.sql b/src/test/regress/sql/multi_mx_create_table.sql index de3468415..4fb6eadbb 100644 --- a/src/test/regress/sql/multi_mx_create_table.sql +++ b/src/test/regress/sql/multi_mx_create_table.sql @@ -5,9 +5,15 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1220000; +SET client_min_messages TO WARNING; + SELECT start_metadata_sync_to_node('localhost', :worker_1_port); SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +-- cannot drop them at the end of the test file as other tests depend on them +DROP SCHEMA IF EXISTS citus_mx_test_schema, citus_mx_test_schema_join_1, citus_mx_test_schema_join_2 CASCADE; +DROP TABLE IF EXISTS nation_hash, lineitem_mx, orders_mx, customer_mx, nation_mx, part_mx, supplier_mx, mx_ddl_table, limit_orders_mx, multiple_hash_mx, app_analytics_events_mx, researchers_mx, labs_mx, objects_mx, articles_hash_mx, articles_single_shard_hash_mx, company_employees_mx; + -- create schema to test schema support CREATE SCHEMA citus_mx_test_schema; CREATE SCHEMA citus_mx_test_schema_join_1; @@ -38,7 +44,7 @@ END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) +CREATE OR REPLACE FUNCTION public.immutable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; CREATE OPERATOR citus_mx_test_schema.=== ( @@ -67,14 +73,16 @@ SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset CREATE COLLATION citus_mx_test_schema.english (LOCALE=:current_locale); CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text); -CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); +CREATE TYPE citus_mx_test_schema.order_side_mx AS ENUM ('buy', 'sell'); -- now create required stuff in the worker 1 \c - - - :worker_1_port +SET client_min_messages TO WARNING; -- show that we do not support creating citus local tables from mx workers for now CREATE TABLE citus_local_table(a int); SELECT citus_add_local_table_to_metadata('citus_local_table'); +DROP TABLE citus_local_table; SET search_path TO citus_mx_test_schema; -- create operator @@ -89,6 +97,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( -- now create required stuff in the worker 2 \c - - - :worker_2_port +SET client_min_messages TO WARNING; SET search_path TO citus_mx_test_schema; @@ -104,6 +113,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( -- connect back to the master, and do some more tests \c - - - :master_port +SET client_min_messages TO WARNING; SET citus.shard_replication_factor TO 1; SET search_path TO public; @@ -308,7 +318,7 @@ CREATE TABLE limit_orders_mx ( symbol text NOT NULL, bidder_id bigint NOT NULL, placed_at timestamp NOT NULL, - kind order_side_mx NOT NULL, + kind citus_mx_test_schema.order_side_mx NOT NULL, limit_price decimal NOT NULL DEFAULT 0.00 CHECK (limit_price >= 0.00) ); @@ -386,6 +396,7 @@ FROM citus_tables ORDER BY table_name::text; \c - - - :worker_1_port +SET client_min_messages TO WARNING; SELECT table_name, citus_table_type, distribution_column, shard_count, table_owner FROM citus_tables @@ -394,4 +405,4 @@ ORDER BY table_name::text; SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards ORDER BY shard_name::text; -- Show that altering type name is not supported from worker node -ALTER TYPE order_side_mx RENAME TO temp_order_side_mx; +ALTER TYPE citus_mx_test_schema.order_side_mx RENAME TO temp_order_side_mx; diff --git a/src/test/regress/sql/multi_mx_insert_select_repartition.sql b/src/test/regress/sql/multi_mx_insert_select_repartition.sql index 4a9c8c96f..b206c6e4e 100644 --- a/src/test/regress/sql/multi_mx_insert_select_repartition.sql +++ b/src/test/regress/sql/multi_mx_insert_select_repartition.sql @@ -55,6 +55,8 @@ SET citus.log_local_commands to on; -- INSERT .. SELECT via repartitioning with local execution BEGIN; select count(*) from source_table WHERE a = 1; + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; ROLLBACK; diff --git a/src/test/regress/sql/multi_mx_node_metadata.sql b/src/test/regress/sql/multi_mx_node_metadata.sql index 45b4edae1..e0d765a20 100644 --- a/src/test/regress/sql/multi_mx_node_metadata.sql +++ b/src/test/regress/sql/multi_mx_node_metadata.sql @@ -14,7 +14,7 @@ SET citus.shard_replication_factor TO 1; \set VERBOSITY terse -- Simulates a readonly node by setting default_transaction_read_only. -CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN) +CREATE OR REPLACE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN) RETURNS TEXT LANGUAGE sql AS $$ @@ -35,7 +35,7 @@ CREATE OR REPLACE FUNCTION raise_error_in_metadata_sync() LANGUAGE C STRICT AS 'citus'; -CREATE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$ +CREATE OR REPLACE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$ declare counter integer := -1; begin @@ -378,7 +378,22 @@ SELECT trigger_metadata_sync(); SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; -DROP DATABASE db_to_drop; +DO $$ +DECLARE + i int := 0; +BEGIN + WHILE NOT (SELECT bool_and(success) from run_command_on_all_nodes('DROP DATABASE IF EXISTS db_to_drop')) + LOOP + BEGIN + i := i + 1; + IF i > 5 THEN + RAISE EXCEPTION 'DROP DATABASE timed out'; + END IF; + PERFORM pg_sleep(1); + END; + END LOOP; +END; +$$; SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; diff --git a/src/test/regress/sql/multi_utilities.sql b/src/test/regress/sql/multi_utilities.sql index 9a14ab590..1124b9890 100644 --- a/src/test/regress/sql/multi_utilities.sql +++ b/src/test/regress/sql/multi_utilities.sql @@ -229,6 +229,8 @@ VACUUM; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM local_vacuum_table; +VACUUM local_vacuum_table; +VACUUM local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 25000000 THEN 22500000 ELSE s END FROM pg_total_relation_size('local_vacuum_table') s ; @@ -257,12 +259,16 @@ VACUUM (DISABLE_PAGE_SKIPPING false) local_vacuum_table; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 50000000 AND 70000000 THEN 60000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 49999999 THEN 35000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; @@ -270,11 +276,15 @@ FROM pg_total_relation_size('local_vacuum_table') s ; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; vacuum (TRUNCATE false) local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; SELECT pg_total_relation_size('local_vacuum_table') as size1 \gset insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; vacuum (TRUNCATE true) local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; SELECT pg_total_relation_size('local_vacuum_table') as size2 \gset SELECT :size1 > :size2 as truncate_less_size; diff --git a/src/test/regress/sql/role_command_from_any_node.sql b/src/test/regress/sql/role_command_from_any_node.sql new file mode 100644 index 000000000..0fd574716 --- /dev/null +++ b/src/test/regress/sql/role_command_from_any_node.sql @@ -0,0 +1,174 @@ +-- idempotently remove the coordinator from metadata +SELECT COUNT(citus_remove_node(nodename, nodeport)) >= 0 FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :master_port; + +-- make sure that CREATE ROLE from workers is not supported when coordinator is not added to metadata +SELECT result FROM run_command_on_workers('CREATE ROLE test_role'); + +\c - - - :master_port + +CREATE SCHEMA role_command_from_any_node; +SET search_path TO role_command_from_any_node; + +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0); + +CREATE OR REPLACE FUNCTION check_role_on_all_nodes(p_role_name text) +RETURNS TABLE (node_type text, result text) +AS $func$ +DECLARE + v_worker_query text; +BEGIN + v_worker_query := format( + $$ + SELECT to_jsonb(q1.*) FROM ( + SELECT + ( + SELECT COUNT(*) = 1 FROM pg_roles WHERE rolname = '%s' + ) AS role_exists, + ( + SELECT to_jsonb(q.*) FROM (SELECT * FROM pg_roles WHERE rolname = '%s') q + ) AS role_properties, + ( + SELECT COUNT(*) = 1 + FROM pg_dist_object + WHERE objid = (SELECT oid FROM pg_roles WHERE rolname = '%s') + ) AS pg_dist_object_record_for_role_exists, + ( + SELECT COUNT(*) > 0 + FROM pg_dist_object + WHERE classid = 1260 AND objid NOT IN (SELECT oid FROM pg_roles) + ) AS stale_pg_dist_object_record_for_a_role_exists + ) q1 + $$, + p_role_name, p_role_name, p_role_name + ); + + RETURN QUERY + SELECT + CASE WHEN (groupid = 0 AND groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'coordinator (local)' + WHEN (groupid = 0) THEN 'coordinator (remote)' + WHEN (groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'worker node (local)' + ELSE 'worker node (remote)' + END AS node_type, + q2.result + FROM run_command_on_all_nodes(v_worker_query) q2 + JOIN pg_dist_node USING (nodeid); +END; +$func$ LANGUAGE plpgsql; + +\c - - - :worker_1_port + +SET search_path TO role_command_from_any_node; +SET client_min_messages TO NOTICE; + +SET citus.enable_create_role_propagation TO OFF; + +CREATE ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +CREATE ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +SET citus.enable_create_role_propagation TO ON; + +-- doesn't fail even if the role doesn't exist on other nodes +DROP ROLE test_role; + +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +CREATE ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +CREATE ROLE test_role; + +SET citus.enable_alter_role_propagation TO OFF; + +ALTER ROLE test_role RENAME TO test_role_renamed; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +ALTER ROLE test_role_renamed RENAME TO test_role; + +SET citus.enable_alter_role_propagation TO ON; + +ALTER ROLE test_role RENAME TO test_role_renamed; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +SET citus.enable_alter_role_propagation TO OFF; +ALTER ROLE test_role_renamed CREATEDB; +SET citus.enable_alter_role_propagation TO ON; + +SELECT node_type, (result::jsonb)->'role_properties'->'rolcreatedb' as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +ALTER ROLE test_role_renamed CREATEDB; +SELECT node_type, (result::jsonb)->'role_properties'->'rolcreatedb' as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +SET citus.enable_alter_role_set_propagation TO ON; + +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF; + +SELECT result FROM run_command_on_all_nodes('SHOW enable_hashjoin') ORDER BY result; + +SET citus.enable_alter_role_set_propagation TO OFF; + +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO ON; + +SELECT result FROM run_command_on_all_nodes('SHOW enable_hashjoin') ORDER BY result; + +SET citus.enable_alter_role_set_propagation TO ON; + +ALTER ROLE current_user IN DATABASE "regression" RESET enable_hashjoin; + +CREATE ROLE another_user; + +SET citus.enable_create_role_propagation TO OFF; + +GRANT another_user TO test_role_renamed; + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_auth_members WHERE roleid = 'another_user'::regrole AND member = 'test_role_renamed'::regrole +$$) ORDER BY result; + +SET citus.enable_create_role_propagation TO ON; + +SET client_min_messages TO ERROR; +GRANT another_user TO test_role_renamed; +SET client_min_messages TO NOTICE; + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_auth_members WHERE roleid = 'another_user'::regrole AND member = 'test_role_renamed'::regrole +$$) ORDER BY result; + +\c - - - :master_port + +SET search_path TO role_command_from_any_node; +SET client_min_messages TO NOTICE; + +SELECT citus_remove_node('localhost', :worker_1_port); +SELECT 1 FROM citus_add_node('localhost', :worker_1_port); + +-- make sure that citus_add_node() propagates the roles created via a worker +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +SELECT citus_remove_node('localhost', :master_port); + +\c - - - :worker_1_port + +-- they fail because the coordinator is not added to metadata +DROP ROLE test_role_renamed; +ALTER ROLE test_role_renamed RENAME TO test_role; +ALTER ROLE test_role_renamed CREATEDB; +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF; +GRANT another_user TO test_role_renamed; + +\c - - - :master_port + +DROP ROLE test_role_renamed, another_user; + +SET client_min_messages TO WARNING; +DROP SCHEMA role_command_from_any_node CASCADE; diff --git a/src/test/regress/sql/text_search.sql b/src/test/regress/sql/text_search.sql index d0d4b5a6f..4a65a5e1a 100644 --- a/src/test/regress/sql/text_search.sql +++ b/src/test/regress/sql/text_search.sql @@ -199,9 +199,9 @@ SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config1'::regconfig; SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config2'::regconfig; $$) ORDER BY 1,2; SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; $$) ORDER BY 1,2; -- verify they are all removed locally -SELECT 'text_search.config1'::regconfig; -SELECT 'text_search.config2'::regconfig; -SELECT 'text_search.config3'::regconfig; +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config1' AND cfgnamespace = 'text_search'::regnamespace; +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config2' AND cfgnamespace = 'text_search'::regnamespace; +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config3' AND cfgnamespace = 'text_search'::regnamespace; -- verify that indexes created concurrently that would propagate a TEXT SEARCH CONFIGURATION object SET citus.enable_ddl_propagation TO off; @@ -235,7 +235,7 @@ CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = f -- now we expect manually_created_wrongly(citus_backup_XXX) to show up when querying the configurations SELECT * FROM run_command_on_workers($$ - SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; + SELECT array_agg(cfgname ORDER BY cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; $$) ORDER BY 1,2; -- verify the objects get reused appropriately when the specification is the same @@ -249,7 +249,7 @@ CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = f -- now we don't expect manually_created_correct(citus_backup_XXX) to show up when querying the configurations as the -- original one is reused SELECT * FROM run_command_on_workers($$ - SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; + SELECT array_agg(cfgname ORDER BY cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; $$) ORDER BY 1,2; CREATE SCHEMA "Text Search Requiring Quote's";