From 65cc1c9aad202990b4e72aff15369142c55b2154 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Wed, 26 Jun 2024 18:02:52 +0200 Subject: [PATCH 01/35] fix: improve error handling for tracing settings --- poetry.lock | 218 ++++++++++++++++----------------- pyinfra/utils/opentelemetry.py | 11 +- 2 files changed, 116 insertions(+), 113 deletions(-) diff --git a/poetry.lock b/poetry.lock index 62eb91f..8b9f874 100644 --- a/poetry.lock +++ b/poetry.lock @@ -249,13 +249,13 @@ opentelemetry-sdk = ">=1.25,<2.0" [[package]] name = "azure-monitor-opentelemetry-exporter" -version = "1.0.0b26" +version = "1.0.0b27" description = "Microsoft Azure Monitor Opentelemetry Exporter Client Library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "azure-monitor-opentelemetry-exporter-1.0.0b26.tar.gz", hash = "sha256:ddde89178186d9be724768e3d3b322c21fc315d8a48efc3c27889ed1accbac05"}, - {file = "azure_monitor_opentelemetry_exporter-1.0.0b26-py2.py3-none-any.whl", hash = "sha256:295e7dcba52936f549f4568f2ce37583c2d6e0dbb06d7c317e91f6e7c47a734b"}, + {file = "azure-monitor-opentelemetry-exporter-1.0.0b27.tar.gz", hash = "sha256:ee5eb0bb37c29da800cc479084f42181a98d7ad192a27a9b2fdd9cb9957320ad"}, + {file = "azure_monitor_opentelemetry_exporter-1.0.0b27-py2.py3-none-any.whl", hash = "sha256:92f222e11415c6606588be0166b02ba4970159c6bf016160a2023b3713db9f31"}, ] [package.dependencies] @@ -583,63 +583,63 @@ test = ["pytest"] [[package]] name = "coverage" -version = "7.5.3" +version = "7.5.4" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a6519d917abb15e12380406d721e37613e2a67d166f9fb7e5a8ce0375744cd45"}, - {file = "coverage-7.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aea7da970f1feccf48be7335f8b2ca64baf9b589d79e05b9397a06696ce1a1ec"}, - {file = "coverage-7.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:923b7b1c717bd0f0f92d862d1ff51d9b2b55dbbd133e05680204465f454bb286"}, - {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62bda40da1e68898186f274f832ef3e759ce929da9a9fd9fcf265956de269dbc"}, - {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8b7339180d00de83e930358223c617cc343dd08e1aa5ec7b06c3a121aec4e1d"}, - {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:25a5caf742c6195e08002d3b6c2dd6947e50efc5fc2c2205f61ecb47592d2d83"}, - {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:05ac5f60faa0c704c0f7e6a5cbfd6f02101ed05e0aee4d2822637a9e672c998d"}, - {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:239a4e75e09c2b12ea478d28815acf83334d32e722e7433471fbf641c606344c"}, - {file = "coverage-7.5.3-cp310-cp310-win32.whl", hash = "sha256:a5812840d1d00eafae6585aba38021f90a705a25b8216ec7f66aebe5b619fb84"}, - {file = "coverage-7.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:33ca90a0eb29225f195e30684ba4a6db05dbef03c2ccd50b9077714c48153cac"}, - {file = "coverage-7.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f81bc26d609bf0fbc622c7122ba6307993c83c795d2d6f6f6fd8c000a770d974"}, - {file = "coverage-7.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7cec2af81f9e7569280822be68bd57e51b86d42e59ea30d10ebdbb22d2cb7232"}, - {file = "coverage-7.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55f689f846661e3f26efa535071775d0483388a1ccfab899df72924805e9e7cd"}, - {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50084d3516aa263791198913a17354bd1dc627d3c1639209640b9cac3fef5807"}, - {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:341dd8f61c26337c37988345ca5c8ccabeff33093a26953a1ac72e7d0103c4fb"}, - {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ab0b028165eea880af12f66086694768f2c3139b2c31ad5e032c8edbafca6ffc"}, - {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5bc5a8c87714b0c67cfeb4c7caa82b2d71e8864d1a46aa990b5588fa953673b8"}, - {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38a3b98dae8a7c9057bd91fbf3415c05e700a5114c5f1b5b0ea5f8f429ba6614"}, - {file = "coverage-7.5.3-cp311-cp311-win32.whl", hash = "sha256:fcf7d1d6f5da887ca04302db8e0e0cf56ce9a5e05f202720e49b3e8157ddb9a9"}, - {file = "coverage-7.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:8c836309931839cca658a78a888dab9676b5c988d0dd34ca247f5f3e679f4e7a"}, - {file = "coverage-7.5.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:296a7d9bbc598e8744c00f7a6cecf1da9b30ae9ad51c566291ff1314e6cbbed8"}, - {file = "coverage-7.5.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:34d6d21d8795a97b14d503dcaf74226ae51eb1f2bd41015d3ef332a24d0a17b3"}, - {file = "coverage-7.5.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e317953bb4c074c06c798a11dbdd2cf9979dbcaa8ccc0fa4701d80042d4ebf1"}, - {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:705f3d7c2b098c40f5b81790a5fedb274113373d4d1a69e65f8b68b0cc26f6db"}, - {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1196e13c45e327d6cd0b6e471530a1882f1017eb83c6229fc613cd1a11b53cd"}, - {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:015eddc5ccd5364dcb902eaecf9515636806fa1e0d5bef5769d06d0f31b54523"}, - {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fd27d8b49e574e50caa65196d908f80e4dff64d7e592d0c59788b45aad7e8b35"}, - {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:33fc65740267222fc02975c061eb7167185fef4cc8f2770267ee8bf7d6a42f84"}, - {file = "coverage-7.5.3-cp312-cp312-win32.whl", hash = "sha256:7b2a19e13dfb5c8e145c7a6ea959485ee8e2204699903c88c7d25283584bfc08"}, - {file = "coverage-7.5.3-cp312-cp312-win_amd64.whl", hash = "sha256:0bbddc54bbacfc09b3edaec644d4ac90c08ee8ed4844b0f86227dcda2d428fcb"}, - {file = "coverage-7.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f78300789a708ac1f17e134593f577407d52d0417305435b134805c4fb135adb"}, - {file = "coverage-7.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b368e1aee1b9b75757942d44d7598dcd22a9dbb126affcbba82d15917f0cc155"}, - {file = "coverage-7.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f836c174c3a7f639bded48ec913f348c4761cbf49de4a20a956d3431a7c9cb24"}, - {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:244f509f126dc71369393ce5fea17c0592c40ee44e607b6d855e9c4ac57aac98"}, - {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4c2872b3c91f9baa836147ca33650dc5c172e9273c808c3c3199c75490e709d"}, - {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dd4b3355b01273a56b20c219e74e7549e14370b31a4ffe42706a8cda91f19f6d"}, - {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f542287b1489c7a860d43a7d8883e27ca62ab84ca53c965d11dac1d3a1fab7ce"}, - {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:75e3f4e86804023e991096b29e147e635f5e2568f77883a1e6eed74512659ab0"}, - {file = "coverage-7.5.3-cp38-cp38-win32.whl", hash = "sha256:c59d2ad092dc0551d9f79d9d44d005c945ba95832a6798f98f9216ede3d5f485"}, - {file = "coverage-7.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:fa21a04112c59ad54f69d80e376f7f9d0f5f9123ab87ecd18fbb9ec3a2beed56"}, - {file = "coverage-7.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5102a92855d518b0996eb197772f5ac2a527c0ec617124ad5242a3af5e25f85"}, - {file = "coverage-7.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d1da0a2e3b37b745a2b2a678a4c796462cf753aebf94edcc87dcc6b8641eae31"}, - {file = "coverage-7.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8383a6c8cefba1b7cecc0149415046b6fc38836295bc4c84e820872eb5478b3d"}, - {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aad68c3f2566dfae84bf46295a79e79d904e1c21ccfc66de88cd446f8686341"}, - {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e079c9ec772fedbade9d7ebc36202a1d9ef7291bc9b3a024ca395c4d52853d7"}, - {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bde997cac85fcac227b27d4fb2c7608a2c5f6558469b0eb704c5726ae49e1c52"}, - {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:990fb20b32990b2ce2c5f974c3e738c9358b2735bc05075d50a6f36721b8f303"}, - {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3d5a67f0da401e105753d474369ab034c7bae51a4c31c77d94030d59e41df5bd"}, - {file = "coverage-7.5.3-cp39-cp39-win32.whl", hash = "sha256:e08c470c2eb01977d221fd87495b44867a56d4d594f43739a8028f8646a51e0d"}, - {file = "coverage-7.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:1d2a830ade66d3563bb61d1e3c77c8def97b30ed91e166c67d0632c018f380f0"}, - {file = "coverage-7.5.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:3538d8fb1ee9bdd2e2692b3b18c22bb1c19ffbefd06880f5ac496e42d7bb3884"}, - {file = "coverage-7.5.3.tar.gz", hash = "sha256:04aefca5190d1dc7a53a4c1a5a7f8568811306d7a8ee231c42fb69215571944f"}, + {file = "coverage-7.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6cfb5a4f556bb51aba274588200a46e4dd6b505fb1a5f8c5ae408222eb416f99"}, + {file = "coverage-7.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2174e7c23e0a454ffe12267a10732c273243b4f2d50d07544a91198f05c48f47"}, + {file = "coverage-7.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2214ee920787d85db1b6a0bd9da5f8503ccc8fcd5814d90796c2f2493a2f4d2e"}, + {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1137f46adb28e3813dec8c01fefadcb8c614f33576f672962e323b5128d9a68d"}, + {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b385d49609f8e9efc885790a5a0e89f2e3ae042cdf12958b6034cc442de428d3"}, + {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b4a474f799456e0eb46d78ab07303286a84a3140e9700b9e154cfebc8f527016"}, + {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5cd64adedf3be66f8ccee418473c2916492d53cbafbfcff851cbec5a8454b136"}, + {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e564c2cf45d2f44a9da56f4e3a26b2236504a496eb4cb0ca7221cd4cc7a9aca9"}, + {file = "coverage-7.5.4-cp310-cp310-win32.whl", hash = "sha256:7076b4b3a5f6d2b5d7f1185fde25b1e54eb66e647a1dfef0e2c2bfaf9b4c88c8"}, + {file = "coverage-7.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:018a12985185038a5b2bcafab04ab833a9a0f2c59995b3cec07e10074c78635f"}, + {file = "coverage-7.5.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:db14f552ac38f10758ad14dd7b983dbab424e731588d300c7db25b6f89e335b5"}, + {file = "coverage-7.5.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3257fdd8e574805f27bb5342b77bc65578e98cbc004a92232106344053f319ba"}, + {file = "coverage-7.5.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a6612c99081d8d6134005b1354191e103ec9705d7ba2754e848211ac8cacc6b"}, + {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d45d3cbd94159c468b9b8c5a556e3f6b81a8d1af2a92b77320e887c3e7a5d080"}, + {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed550e7442f278af76d9d65af48069f1fb84c9f745ae249c1a183c1e9d1b025c"}, + {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7a892be37ca35eb5019ec85402c3371b0f7cda5ab5056023a7f13da0961e60da"}, + {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8192794d120167e2a64721d88dbd688584675e86e15d0569599257566dec9bf0"}, + {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:820bc841faa502e727a48311948e0461132a9c8baa42f6b2b84a29ced24cc078"}, + {file = "coverage-7.5.4-cp311-cp311-win32.whl", hash = "sha256:6aae5cce399a0f065da65c7bb1e8abd5c7a3043da9dceb429ebe1b289bc07806"}, + {file = "coverage-7.5.4-cp311-cp311-win_amd64.whl", hash = "sha256:d2e344d6adc8ef81c5a233d3a57b3c7d5181f40e79e05e1c143da143ccb6377d"}, + {file = "coverage-7.5.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:54317c2b806354cbb2dc7ac27e2b93f97096912cc16b18289c5d4e44fc663233"}, + {file = "coverage-7.5.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:042183de01f8b6d531e10c197f7f0315a61e8d805ab29c5f7b51a01d62782747"}, + {file = "coverage-7.5.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6bb74ed465d5fb204b2ec41d79bcd28afccf817de721e8a807d5141c3426638"}, + {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3d45ff86efb129c599a3b287ae2e44c1e281ae0f9a9bad0edc202179bcc3a2e"}, + {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5013ed890dc917cef2c9f765c4c6a8ae9df983cd60dbb635df8ed9f4ebc9f555"}, + {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1014fbf665fef86cdfd6cb5b7371496ce35e4d2a00cda501cf9f5b9e6fced69f"}, + {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3684bc2ff328f935981847082ba4fdc950d58906a40eafa93510d1b54c08a66c"}, + {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:581ea96f92bf71a5ec0974001f900db495488434a6928a2ca7f01eee20c23805"}, + {file = "coverage-7.5.4-cp312-cp312-win32.whl", hash = "sha256:73ca8fbc5bc622e54627314c1a6f1dfdd8db69788f3443e752c215f29fa87a0b"}, + {file = "coverage-7.5.4-cp312-cp312-win_amd64.whl", hash = "sha256:cef4649ec906ea7ea5e9e796e68b987f83fa9a718514fe147f538cfeda76d7a7"}, + {file = "coverage-7.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdd31315fc20868c194130de9ee6bfd99755cc9565edff98ecc12585b90be882"}, + {file = "coverage-7.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:02ff6e898197cc1e9fa375581382b72498eb2e6d5fc0b53f03e496cfee3fac6d"}, + {file = "coverage-7.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d05c16cf4b4c2fc880cb12ba4c9b526e9e5d5bb1d81313d4d732a5b9fe2b9d53"}, + {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5986ee7ea0795a4095ac4d113cbb3448601efca7f158ec7f7087a6c705304e4"}, + {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df54843b88901fdc2f598ac06737f03d71168fd1175728054c8f5a2739ac3e4"}, + {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ab73b35e8d109bffbda9a3e91c64e29fe26e03e49addf5b43d85fc426dde11f9"}, + {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:aea072a941b033813f5e4814541fc265a5c12ed9720daef11ca516aeacd3bd7f"}, + {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:16852febd96acd953b0d55fc842ce2dac1710f26729b31c80b940b9afcd9896f"}, + {file = "coverage-7.5.4-cp38-cp38-win32.whl", hash = "sha256:8f894208794b164e6bd4bba61fc98bf6b06be4d390cf2daacfa6eca0a6d2bb4f"}, + {file = "coverage-7.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:e2afe743289273209c992075a5a4913e8d007d569a406ffed0bd080ea02b0633"}, + {file = "coverage-7.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b95c3a8cb0463ba9f77383d0fa8c9194cf91f64445a63fc26fb2327e1e1eb088"}, + {file = "coverage-7.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3d7564cc09dd91b5a6001754a5b3c6ecc4aba6323baf33a12bd751036c998be4"}, + {file = "coverage-7.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44da56a2589b684813f86d07597fdf8a9c6ce77f58976727329272f5a01f99f7"}, + {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e16f3d6b491c48c5ae726308e6ab1e18ee830b4cdd6913f2d7f77354b33f91c8"}, + {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbc5958cb471e5a5af41b0ddaea96a37e74ed289535e8deca404811f6cb0bc3d"}, + {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a04e990a2a41740b02d6182b498ee9796cf60eefe40cf859b016650147908029"}, + {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ddbd2f9713a79e8e7242d7c51f1929611e991d855f414ca9996c20e44a895f7c"}, + {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b1ccf5e728ccf83acd313c89f07c22d70d6c375a9c6f339233dcf792094bcbf7"}, + {file = "coverage-7.5.4-cp39-cp39-win32.whl", hash = "sha256:56b4eafa21c6c175b3ede004ca12c653a88b6f922494b023aeb1e836df953ace"}, + {file = "coverage-7.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:65e528e2e921ba8fd67d9055e6b9f9e34b21ebd6768ae1c1723f4ea6ace1234d"}, + {file = "coverage-7.5.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:79b356f3dd5b26f3ad23b35c75dbdaf1f9e2450b6bcefc6d0825ea0aa3f86ca5"}, + {file = "coverage-7.5.4.tar.gz", hash = "sha256:a44963520b069e12789d0faea4e9fdb1e410cdc4aab89d94f7f55cbb7fef0353"}, ] [package.extras] @@ -720,13 +720,13 @@ tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""} [[package]] name = "cyclonedx-python-lib" -version = "7.4.0" +version = "7.4.1" description = "Python library for CycloneDX" optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "cyclonedx_python_lib-7.4.0-py3-none-any.whl", hash = "sha256:fc423e7f46d772e5ded29a48cb0743233e692e5853c49b829efc0f59014efde1"}, - {file = "cyclonedx_python_lib-7.4.0.tar.gz", hash = "sha256:09b10736a7f440262578fa40f470b448de1ebf3c7a71e2ff0a4af0781d3a3b42"}, + {file = "cyclonedx_python_lib-7.4.1-py3-none-any.whl", hash = "sha256:73bf8d5c09ad10698c75d3ce3f123c84c9aff3959d67b8b5ca9e5a7c5da43abe"}, + {file = "cyclonedx_python_lib-7.4.1.tar.gz", hash = "sha256:23bf8196e008bb8e06c1040ad2ab69492891d8a581cb2aefa36a77f199790a37"}, ] [package.dependencies] @@ -744,33 +744,33 @@ xml-validation = ["lxml (>=4,<6)"] [[package]] name = "debugpy" -version = "1.8.1" +version = "1.8.2" description = "An implementation of the Debug Adapter Protocol for Python" optional = false python-versions = ">=3.8" files = [ - {file = "debugpy-1.8.1-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:3bda0f1e943d386cc7a0e71bfa59f4137909e2ed947fb3946c506e113000f741"}, - {file = "debugpy-1.8.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dda73bf69ea479c8577a0448f8c707691152e6c4de7f0c4dec5a4bc11dee516e"}, - {file = "debugpy-1.8.1-cp310-cp310-win32.whl", hash = "sha256:3a79c6f62adef994b2dbe9fc2cc9cc3864a23575b6e387339ab739873bea53d0"}, - {file = "debugpy-1.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:7eb7bd2b56ea3bedb009616d9e2f64aab8fc7000d481faec3cd26c98a964bcdd"}, - {file = "debugpy-1.8.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:016a9fcfc2c6b57f939673c874310d8581d51a0fe0858e7fac4e240c5eb743cb"}, - {file = "debugpy-1.8.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd97ed11a4c7f6d042d320ce03d83b20c3fb40da892f994bc041bbc415d7a099"}, - {file = "debugpy-1.8.1-cp311-cp311-win32.whl", hash = "sha256:0de56aba8249c28a300bdb0672a9b94785074eb82eb672db66c8144fff673146"}, - {file = "debugpy-1.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:1a9fe0829c2b854757b4fd0a338d93bc17249a3bf69ecf765c61d4c522bb92a8"}, - {file = "debugpy-1.8.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3ebb70ba1a6524d19fa7bb122f44b74170c447d5746a503e36adc244a20ac539"}, - {file = "debugpy-1.8.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2e658a9630f27534e63922ebf655a6ab60c370f4d2fc5c02a5b19baf4410ace"}, - {file = "debugpy-1.8.1-cp312-cp312-win32.whl", hash = "sha256:caad2846e21188797a1f17fc09c31b84c7c3c23baf2516fed5b40b378515bbf0"}, - {file = "debugpy-1.8.1-cp312-cp312-win_amd64.whl", hash = "sha256:edcc9f58ec0fd121a25bc950d4578df47428d72e1a0d66c07403b04eb93bcf98"}, - {file = "debugpy-1.8.1-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:7a3afa222f6fd3d9dfecd52729bc2e12c93e22a7491405a0ecbf9e1d32d45b39"}, - {file = "debugpy-1.8.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d915a18f0597ef685e88bb35e5d7ab968964b7befefe1aaea1eb5b2640b586c7"}, - {file = "debugpy-1.8.1-cp38-cp38-win32.whl", hash = "sha256:92116039b5500633cc8d44ecc187abe2dfa9b90f7a82bbf81d079fcdd506bae9"}, - {file = "debugpy-1.8.1-cp38-cp38-win_amd64.whl", hash = "sha256:e38beb7992b5afd9d5244e96ad5fa9135e94993b0c551ceebf3fe1a5d9beb234"}, - {file = "debugpy-1.8.1-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:bfb20cb57486c8e4793d41996652e5a6a885b4d9175dd369045dad59eaacea42"}, - {file = "debugpy-1.8.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efd3fdd3f67a7e576dd869c184c5dd71d9aaa36ded271939da352880c012e703"}, - {file = "debugpy-1.8.1-cp39-cp39-win32.whl", hash = "sha256:58911e8521ca0c785ac7a0539f1e77e0ce2df753f786188f382229278b4cdf23"}, - {file = "debugpy-1.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:6df9aa9599eb05ca179fb0b810282255202a66835c6efb1d112d21ecb830ddd3"}, - {file = "debugpy-1.8.1-py2.py3-none-any.whl", hash = "sha256:28acbe2241222b87e255260c76741e1fbf04fdc3b6d094fcf57b6c6f75ce1242"}, - {file = "debugpy-1.8.1.zip", hash = "sha256:f696d6be15be87aef621917585f9bb94b1dc9e8aced570db1b8a6fc14e8f9b42"}, + {file = "debugpy-1.8.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:7ee2e1afbf44b138c005e4380097d92532e1001580853a7cb40ed84e0ef1c3d2"}, + {file = "debugpy-1.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f8c3f7c53130a070f0fc845a0f2cee8ed88d220d6b04595897b66605df1edd6"}, + {file = "debugpy-1.8.2-cp310-cp310-win32.whl", hash = "sha256:f179af1e1bd4c88b0b9f0fa153569b24f6b6f3de33f94703336363ae62f4bf47"}, + {file = "debugpy-1.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:0600faef1d0b8d0e85c816b8bb0cb90ed94fc611f308d5fde28cb8b3d2ff0fe3"}, + {file = "debugpy-1.8.2-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:8a13417ccd5978a642e91fb79b871baded925d4fadd4dfafec1928196292aa0a"}, + {file = "debugpy-1.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acdf39855f65c48ac9667b2801234fc64d46778021efac2de7e50907ab90c634"}, + {file = "debugpy-1.8.2-cp311-cp311-win32.whl", hash = "sha256:2cbd4d9a2fc5e7f583ff9bf11f3b7d78dfda8401e8bb6856ad1ed190be4281ad"}, + {file = "debugpy-1.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:d3408fddd76414034c02880e891ea434e9a9cf3a69842098ef92f6e809d09afa"}, + {file = "debugpy-1.8.2-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:5d3ccd39e4021f2eb86b8d748a96c766058b39443c1f18b2dc52c10ac2757835"}, + {file = "debugpy-1.8.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62658aefe289598680193ff655ff3940e2a601765259b123dc7f89c0239b8cd3"}, + {file = "debugpy-1.8.2-cp312-cp312-win32.whl", hash = "sha256:bd11fe35d6fd3431f1546d94121322c0ac572e1bfb1f6be0e9b8655fb4ea941e"}, + {file = "debugpy-1.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:15bc2f4b0f5e99bf86c162c91a74c0631dbd9cef3c6a1d1329c946586255e859"}, + {file = "debugpy-1.8.2-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:5a019d4574afedc6ead1daa22736c530712465c0c4cd44f820d803d937531b2d"}, + {file = "debugpy-1.8.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40f062d6877d2e45b112c0bbade9a17aac507445fd638922b1a5434df34aed02"}, + {file = "debugpy-1.8.2-cp38-cp38-win32.whl", hash = "sha256:c78ba1680f1015c0ca7115671fe347b28b446081dada3fedf54138f44e4ba031"}, + {file = "debugpy-1.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:cf327316ae0c0e7dd81eb92d24ba8b5e88bb4d1b585b5c0d32929274a66a5210"}, + {file = "debugpy-1.8.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:1523bc551e28e15147815d1397afc150ac99dbd3a8e64641d53425dba57b0ff9"}, + {file = "debugpy-1.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e24ccb0cd6f8bfaec68d577cb49e9c680621c336f347479b3fce060ba7c09ec1"}, + {file = "debugpy-1.8.2-cp39-cp39-win32.whl", hash = "sha256:7f8d57a98c5a486c5c7824bc0b9f2f11189d08d73635c326abef268f83950326"}, + {file = "debugpy-1.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:16c8dcab02617b75697a0a925a62943e26a0330da076e2a10437edd9f0bf3755"}, + {file = "debugpy-1.8.2-py2.py3-none-any.whl", hash = "sha256:16e16df3a98a35c63c3ab1e4d19be4cbc7fdda92d9ddc059294f18910928e0ca"}, + {file = "debugpy-1.8.2.zip", hash = "sha256:95378ed08ed2089221896b9b3a8d021e642c24edc8fef20e5d4342ca8be65c00"}, ] [[package]] @@ -908,18 +908,18 @@ all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)" [[package]] name = "filelock" -version = "3.14.0" +version = "3.15.4" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.14.0-py3-none-any.whl", hash = "sha256:43339835842f110ca7ae60f1e1c160714c5a6afd15a2873419ab185334975c0f"}, - {file = "filelock-3.14.0.tar.gz", hash = "sha256:6ea72da3be9b8c82afd3edcf99f2fffbb5076335a5ae4d03248bb5b6c3eae78a"}, + {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, + {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] typing = ["typing-extensions (>=4.8)"] [[package]] @@ -958,17 +958,17 @@ files = [ [[package]] name = "googleapis-common-protos" -version = "1.63.1" +version = "1.63.2" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" files = [ - {file = "googleapis-common-protos-1.63.1.tar.gz", hash = "sha256:c6442f7a0a6b2a80369457d79e6672bb7dcbaab88e0848302497e3ec80780a6a"}, - {file = "googleapis_common_protos-1.63.1-py2.py3-none-any.whl", hash = "sha256:0e1c2cdfcbc354b76e4a211a35ea35d6926a835cba1377073c4861db904a1877"}, + {file = "googleapis-common-protos-1.63.2.tar.gz", hash = "sha256:27c5abdffc4911f28101e635de1533fb4cfd2c37fbaa9174587c799fac90aa87"}, + {file = "googleapis_common_protos-1.63.2-py2.py3-none-any.whl", hash = "sha256:27a2499c7e8aff199665b22741997e485eccc8645aa9176c7c988e6fae507945"}, ] [package.dependencies] -protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" [package.extras] grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] @@ -2042,13 +2042,13 @@ files = [ [[package]] name = "packageurl-python" -version = "0.15.0" +version = "0.15.1" description = "A purl aka. Package URL parser and builder" optional = false python-versions = ">=3.7" files = [ - {file = "packageurl-python-0.15.0.tar.gz", hash = "sha256:f219b2ce6348185a27bd6a72e6fdc9f984e6c9fa157effa7cb93e341c49cdcc2"}, - {file = "packageurl_python-0.15.0-py3-none-any.whl", hash = "sha256:cdc6bd42dc30c4fc7f8f0ccb721fc31f8c33985dbffccb6e6be4c72874de48ca"}, + {file = "packageurl_python-0.15.1-py3-none-any.whl", hash = "sha256:f7a44ddb9caaf6197b3b62b890ed0be5cb15e962accab2a51db36846d5174562"}, + {file = "packageurl_python-0.15.1.tar.gz", hash = "sha256:9a37b9a7cad9a2872b4612151ba3749fd9dec90485577c14d374b6e66b7edf03"}, ] [package.extras] @@ -2372,13 +2372,13 @@ files = [ [[package]] name = "pydantic" -version = "2.7.3" +version = "2.7.4" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.3-py3-none-any.whl", hash = "sha256:ea91b002777bf643bb20dd717c028ec43216b24a6001a280f83877fd2655d0b4"}, - {file = "pydantic-2.7.3.tar.gz", hash = "sha256:c46c76a40bb1296728d7a8b99aa73dd70a48c3510111ff290034f860c99c419e"}, + {file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"}, + {file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"}, ] [package.dependencies] @@ -2496,13 +2496,13 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pylint" -version = "3.2.3" +version = "3.2.4" description = "python code static checker" optional = false python-versions = ">=3.8.0" files = [ - {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, - {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, + {file = "pylint-3.2.4-py3-none-any.whl", hash = "sha256:43b8ffdf1578e4e4439fa1f6ace402281f5dd61999192280fa12fe411bef2999"}, + {file = "pylint-3.2.4.tar.gz", hash = "sha256:5753d27e49a658b12a48c2883452751a2ecfc7f38594e0980beb03a6e77e6f86"}, ] [package.dependencies] @@ -2970,18 +2970,18 @@ files = [ [[package]] name = "setuptools" -version = "70.0.0" +version = "70.1.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-70.0.0-py3-none-any.whl", hash = "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4"}, - {file = "setuptools-70.0.0.tar.gz", hash = "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"}, + {file = "setuptools-70.1.1-py3-none-any.whl", hash = "sha256:a58a8fde0541dab0419750bcc521fbdf8585f6e5cb41909df3a472ef7b81ca95"}, + {file = "setuptools-70.1.1.tar.gz", hash = "sha256:937a48c7cdb7a21eb53cd7f9b59e525503aa8abaf3584c730dc5f7a5bec3a650"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.10.0)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "six" @@ -3147,13 +3147,13 @@ dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake [[package]] name = "urllib3" -version = "2.2.1" +version = "2.2.2" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, - {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, ] [package.extras] @@ -3183,13 +3183,13 @@ standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", [[package]] name = "virtualenv" -version = "20.26.2" +version = "20.26.3" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.26.2-py3-none-any.whl", hash = "sha256:a624db5e94f01ad993d476b9ee5346fdf7b9de43ccaee0e0197012dc838a0e9b"}, - {file = "virtualenv-20.26.2.tar.gz", hash = "sha256:82bf0f4eebbb78d36ddaee0283d43fe5736b53880b8a8cdcd37390a07ac3741c"}, + {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"}, + {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"}, ] [package.dependencies] diff --git a/pyinfra/utils/opentelemetry.py b/pyinfra/utils/opentelemetry.py index 7a05233..66611ae 100644 --- a/pyinfra/utils/opentelemetry.py +++ b/pyinfra/utils/opentelemetry.py @@ -39,13 +39,16 @@ def setup_trace(settings: Dynaconf, service_name: str = None, exporter: SpanExpo if tracing_type == "azure_monitor": # Configure OpenTelemetry to use Azure Monitor with the # APPLICATIONINSIGHTS_CONNECTION_STRING environment variable. - logger.info("Azure Monitor tracing enabled.") - configure_azure_monitor() + try: + configure_azure_monitor() + logger.info("Azure Monitor tracing enabled.") + except Exception as exception: + logger.warning(f"Azure Monitor tracing could not be enabled: {exception}") elif tracing_type == "opentelemetry": - logger.info("OpenTelemetry tracing enabled.") configure_opentelemtry_tracing(settings, service_name, exporter) + logger.info("OpenTelemetry tracing enabled.") else: - raise Exception(f"Unknown tracing type: {tracing_type}") + logger.warning(f"Unknown tracing type: {tracing_type}. Tracing could not be enabled.") def configure_opentelemtry_tracing(settings: Dynaconf, service_name: str = None, exporter: SpanExporter = None): From 3532f949a99ed8aebbd856d77f386da237392e45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Wed, 26 Jun 2024 18:15:51 +0200 Subject: [PATCH 02/35] refactor: remove second trace setup --- pyinfra/webserver/utils.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/pyinfra/webserver/utils.py b/pyinfra/webserver/utils.py index 8ca4e1d..710c26a 100644 --- a/pyinfra/webserver/utils.py +++ b/pyinfra/webserver/utils.py @@ -8,15 +8,10 @@ from fastapi import FastAPI from pyinfra.config.loader import validate_settings from pyinfra.config.validators import webserver_validators -from pyinfra.utils.opentelemetry import instrument_app, setup_trace def create_webserver_thread_from_settings(app: FastAPI, settings: Dynaconf) -> threading.Thread: validate_settings(settings, validators=webserver_validators) - - if settings.tracing.enabled: - return create_webserver_thread_with_tracing(app, settings) - return create_webserver_thread(app=app, port=settings.webserver.port, host=settings.webserver.host) @@ -29,18 +24,6 @@ def create_webserver_thread(app: FastAPI, port: int, host: str) -> threading.Thr return thread -def create_webserver_thread_with_tracing(app: FastAPI, settings: Dynaconf) -> threading.Thread: - def inner(): - setup_trace(settings) - instrument_app(app) - uvicorn.run(app, port=settings.webserver.port, host=settings.webserver.host, log_level=logging.WARNING) - - thread = threading.Thread(target=inner) - thread.daemon = True - - return thread - - HealthFunction = Callable[[], bool] From 6fabe1ae8cf28a753fe826784aa4ab1142446191 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Fri, 28 Jun 2024 15:41:53 +0200 Subject: [PATCH 03/35] feat: wip for multiple tenants --- pyinfra/queue/multiple_tenants.py | 226 ++++++++++++++++++++++++++++++ 1 file changed, 226 insertions(+) create mode 100644 pyinfra/queue/multiple_tenants.py diff --git a/pyinfra/queue/multiple_tenants.py b/pyinfra/queue/multiple_tenants.py new file mode 100644 index 0000000..5d0b948 --- /dev/null +++ b/pyinfra/queue/multiple_tenants.py @@ -0,0 +1,226 @@ +import atexit +import pika +import os +import json +import logging +import signal +import sys +from threading import Thread +from dynaconf import Dynaconf +from typing import Callable, Union +from kn_utils.logging import logger +from pika.adapters.blocking_connection import BlockingChannel, BlockingConnection +from pika.channel import Channel +from retry import retry + +from pyinfra.config.loader import validate_settings +from pyinfra.config.validators import queue_manager_validators + + +pika_logger = logging.getLogger("pika") +pika_logger.setLevel(logging.WARNING) # disables non-informative pika log clutter + + +class BaseQueueManager: + def __init__(self, settings: Dynaconf): + validate_settings(settings, queue_manager_validators) + + self.connection_parameters = self.create_connection_parameters(settings) + self.connection: Union[BlockingConnection, None] = None + self.channel: Union[BlockingChannel, None] = None + self.connection_sleep = settings.rabbitmq.connection_sleep + self.queue_expiration_time = settings.rabbitmq.queue_expiration_time + self.tenant_exchange_name = settings.rabbitmq.tenant_exchange_name + + tenant_ids = [] + + atexit.register(self.stop_consuming) + signal.signal(signal.SIGTERM, self._handle_stop_signal) + signal.signal(signal.SIGINT, self._handle_stop_signal) + + @staticmethod + def create_connection_parameters(settings: Dynaconf): + credentials = pika.PlainCredentials(username=settings.rabbitmq.username, password=settings.rabbitmq.password) + pika_connection_params = { + "host": settings.rabbitmq.host, + "port": settings.rabbitmq.port, + "credentials": credentials, + "heartbeat": settings.rabbitmq.heartbeat, + } + return pika.ConnectionParameters(**pika_connection_params) + + @retry(tries=3, delay=5, jitter=(1, 3), logger=logger) + def establish_connection(self): + if self.connection and self.connection.is_open: + logger.debug("Connection to RabbitMQ already established.") + return + + logger.info("Establishing connection to RabbitMQ...") + self.connection = pika.BlockingConnection(parameters=self.connection_parameters) + + logger.debug("Opening channel...") + self.channel = self.connection.channel() + self.channel.basic_qos(prefetch_count=1) + self.initialize_queues() + + logger.info("Connection to RabbitMQ established, channel open.") + logger.info("Starting to consume messages...") + Thread(target=self.channel.start_consuming).start() + + def initialize_queues(self): + raise NotImplementedError("Subclasses should implement this method") + + def stop_consuming(self): + if self.channel and self.channel.is_open: + logger.info("Stopping consuming...") + self.channel.stop_consuming() + logger.info("Closing channel...") + self.channel.close() + + if self.connection and self.connection.is_open: + logger.info("Closing connection to RabbitMQ...") + self.connection.close() + + def _handle_stop_signal(self, signum, *args, **kwargs): + logger.info(f"Received signal {signum}, stopping consuming...") + self.stop_consuming() + sys.exit(0) + + +class TenantQueueManager(BaseQueueManager): + def __init__(self, settings: Dynaconf): + super().__init__(settings) + + self.tenant_created_queue_name = self.get_tenant_created_queue_name(settings) + self.tenant_deleted_queue_name = self.get_tenant_deleted_queue_name(settings) + self.tenant_events_dlq_name = self.get_tenant_events_dlq_name(settings) + + self.tenant_ids = [] + + def initialize_queues(self): + self.channel.exchange_declare(exchange=self.tenant_exchange_name, exchange_type="topic") + + self.channel.queue_declare( + queue=self.tenant_created_queue_name, + arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.tenant_events_dlq_name, + "x-expires": self.queue_expiration_time, + }, + durable=True, + ) + self.channel.queue_declare( + queue=self.tenant_deleted_queue_name, + arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.tenant_events_dlq_name, + "x-expires": self.queue_expiration_time, + }, + durable=True, + ) + self.channel.queue_declare( + queue=self.tenant_events_dlq_name, + arguments={"x-expires": self.queue_expiration_time}, + durable=True, + ) + + self.channel.queue_bind( + exchange=self.tenant_exchange_name, queue=self.tenant_created_queue_name, routing_key="tenant.created" + ) + self.channel.queue_bind( + exchange=self.tenant_exchange_name, queue=self.tenant_deleted_queue_name, routing_key="tenant.delete" + ) + + self.channel.basic_consume(queue=self.tenant_created_queue_name, on_message_callback=self.on_tenant_created) + self.channel.basic_consume(queue=self.tenant_deleted_queue_name, on_message_callback=self.on_tenant_deleted) + + def get_tenant_created_queue_name(self, settings: Dynaconf): + return self.get_queue_name_with_suffix( + suffix=settings.rabbitmq.tenant_created_event_queue_suffix, pod_name=settings.kubernetes.pod_name + ) + + def get_tenant_deleted_queue_name(self, settings: Dynaconf): + return self.get_queue_name_with_suffix( + suffix=settings.rabbitmq.tenant_deleted_event_queue_suffix, pod_name=settings.kubernetes.pod_name + ) + + def get_tenant_events_dlq_name(self, settings: Dynaconf): + return self.get_queue_name_with_suffix( + suffix=settings.rabbitmq.tenant_event_dlq_suffix, pod_name=settings.kubernetes.pod_name + ) + + def get_queue_name_with_suffix(self, suffix: str, pod_name: str): + if not self.use_default_queue_name() and pod_name: + return f"{pod_name}{suffix}" + return self.get_default_queue_name() + + def use_default_queue_name(self): + return False + + def get_default_queue_name(self): + raise NotImplementedError("Queue name method not implemented") + + def on_tenant_created(self, ch: Channel, method, properties, body): + logger.info("Received tenant created event") + message = json.loads(body) + logger.info(f"Tenant Created: {message}") + ch.basic_ack(delivery_tag=method.delivery_tag) + + #TODO: replace this w/ working callback + tenant_id = body["tenant_id"] + self.tenant_ids.append(tenant_id) + + def on_tenant_deleted(self, ch, method, properties, body): + logger.info("Received tenant deleted event") + message = json.loads(body) + logger.info(f"Tenant Deleted: {message}") + ch.basic_ack(delivery_tag=method.delivery_tag) + + #TODO: replace this w/ working callback + tenant_id = body["tenant_id"] + self.tenant_ids.remove(tenant_id) + + +class ServiceQueueManager(BaseQueueManager): + def __init__(self, settings: Dynaconf): + super().__init__(settings) + + self.service_request_exchange_name = settings.rabbitmq.service_request_exchange_name + self.service_response_exchange_name = settings.rabbitmq.service_response_exchange_name + self.service_queue_prefix = settings.rabbitmq.service_request_queue_prefix + self.service_dlq_name = settings.rabbitmq.service_dlq_name + + def initialize_queues(self): + self.channel.exchange_declare(exchange=self.service_request_exchange_name, exchange_type="topic") + queue_name = self.service_queue_prefix + "default" + self.channel.queue_declare(queue=queue_name, arguments={"x-max-priority": 2}) + self.channel.queue_bind(exchange=self.service_request_exchange_name, queue=queue_name) + + def start_consuming(self): + self.channel.queue_declare(queue=self.service_queue_prefix + "default") + + self.channel.basic_consume( + queue=self.service_queue_prefix + "default", + on_message_callback=self.react_to_service_request, + auto_ack=True, + ) + + logger.info("Starting to consume messages...") + self.channel.start_consuming() + + def add_tenant_queue(self, tenant_id: str): + queue_name = self.service_queue_prefix + "_" + tenant_id + self.channel.queue_declare(queue_name, durable=True) + self.channel.queue_bind(queue_name, self.service_request_exchange_name) + + def delete_tenant_queue(self, tenant_id: str): + queue_name = self.service_queue_prefix + "_" + tenant_id + self.channel.queue_unbind(queue_name, self.service_request_exchange_name) + self.channel.queue_delete(queue_name) + + def react_to_service_request(self, ch, method, properties, body): + logger.info("Received service request") + message = json.loads(body) + logger.info(f"Service Request: {message}") + ch.basic_ack(delivery_tag=method.delivery_tag) + From 7624208188cc4e386497060d9b705ede5b60fe88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Mon, 1 Jul 2024 18:15:04 +0200 Subject: [PATCH 04/35] feat: wip for multiple tenants --- pyinfra/queue/multiple_tenants.py | 271 +++++++++++++++++++++++++----- 1 file changed, 232 insertions(+), 39 deletions(-) diff --git a/pyinfra/queue/multiple_tenants.py b/pyinfra/queue/multiple_tenants.py index 5d0b948..a3eac52 100644 --- a/pyinfra/queue/multiple_tenants.py +++ b/pyinfra/queue/multiple_tenants.py @@ -1,15 +1,20 @@ import atexit +import concurrent.futures import pika import os import json import logging import signal import sys +import requests +import time +import pika.exceptions from threading import Thread from dynaconf import Dynaconf from typing import Callable, Union from kn_utils.logging import logger from pika.adapters.blocking_connection import BlockingChannel, BlockingConnection +from pika.adapters.select_connection import SelectConnection from pika.channel import Channel from retry import retry @@ -20,20 +25,22 @@ from pyinfra.config.validators import queue_manager_validators pika_logger = logging.getLogger("pika") pika_logger.setLevel(logging.WARNING) # disables non-informative pika log clutter +MessageProcessor = Callable[[dict], dict] + class BaseQueueManager: + tenant_ids = [] + def __init__(self, settings: Dynaconf): validate_settings(settings, queue_manager_validators) self.connection_parameters = self.create_connection_parameters(settings) - self.connection: Union[BlockingConnection, None] = None - self.channel: Union[BlockingChannel, None] = None + self.connection = None + self.channel = None self.connection_sleep = settings.rabbitmq.connection_sleep self.queue_expiration_time = settings.rabbitmq.queue_expiration_time self.tenant_exchange_name = settings.rabbitmq.tenant_exchange_name - tenant_ids = [] - atexit.register(self.stop_consuming) signal.signal(signal.SIGTERM, self._handle_stop_signal) signal.signal(signal.SIGINT, self._handle_stop_signal) @@ -49,23 +56,54 @@ class BaseQueueManager: } return pika.ConnectionParameters(**pika_connection_params) - @retry(tries=3, delay=5, jitter=(1, 3), logger=logger) + # @retry(tries=3, delay=5, jitter=(1, 3), logger=logger) + # def establish_connection(self): + # if self.connection and self.connection.is_open: + # logger.debug("Connection to RabbitMQ already established.") + # return + + # logger.info("Establishing connection to RabbitMQ...") + # self.connection = pika.BlockingConnection(parameters=self.connection_parameters) + + # logger.debug("Opening channel...") + # self.channel = self.connection.channel() + # self.channel.basic_qos(prefetch_count=1) + # self.initialize_queues() + + # logger.info("Connection to RabbitMQ established, channel open.") + def establish_connection(self): if self.connection and self.connection.is_open: logger.debug("Connection to RabbitMQ already established.") return logger.info("Establishing connection to RabbitMQ...") - self.connection = pika.BlockingConnection(parameters=self.connection_parameters) + self.connection = SelectConnection(parameters=self.connection_parameters, + on_open_callback=self.on_connection_open, + on_open_error_callback=self.on_connection_open_error, + on_close_callback=self.on_connection_close) + + def on_connection_open(self, unused_connection): + logger.debug("Connection opened") + self.connection.channel(on_open_callback=self.on_channel_open) - logger.debug("Opening channel...") - self.channel = self.connection.channel() + def on_connection_open_error(self, unused_connection, err): + logger.error(f"Connection open failed, reopening in {self.connection_sleep} seconds: {err}") + self.connection.ioloop.call_later(self.connection_sleep, self.establish_connection) + + def on_connection_close(self, unused_connection, reason): + logger.warning(f"Connection closed, reopening in {self.connection_sleep} seconds: {reason}") + self.connection.ioloop.call_later(self.connection_sleep, self.establish_connection) + + def on_channel_open(self, channel): + logger.debug("Channel opened") + self.channel = channel self.channel.basic_qos(prefetch_count=1) self.initialize_queues() - logger.info("Connection to RabbitMQ established, channel open.") - logger.info("Starting to consume messages...") - Thread(target=self.channel.start_consuming).start() + def is_ready(self): + self.establish_connection() + return self.channel.is_open def initialize_queues(self): raise NotImplementedError("Subclasses should implement this method") @@ -94,8 +132,11 @@ class TenantQueueManager(BaseQueueManager): self.tenant_created_queue_name = self.get_tenant_created_queue_name(settings) self.tenant_deleted_queue_name = self.get_tenant_deleted_queue_name(settings) self.tenant_events_dlq_name = self.get_tenant_events_dlq_name(settings) + self.event_handlers = {"tenant_created": [], "tenant_deleted": []} - self.tenant_ids = [] + TenantQueueManager.tenant_ids = self.get_initial_tenant_ids( + tenant_endpoint_url=settings.storage.tenant_server.endpoint + ) def initialize_queues(self): self.channel.exchange_declare(exchange=self.tenant_exchange_name, exchange_type="topic") @@ -134,6 +175,21 @@ class TenantQueueManager(BaseQueueManager): self.channel.basic_consume(queue=self.tenant_created_queue_name, on_message_callback=self.on_tenant_created) self.channel.basic_consume(queue=self.tenant_deleted_queue_name, on_message_callback=self.on_tenant_deleted) + @retry(tries=3, delay=5, jitter=(1, 3), logger=logger, exceptions=requests.exceptions.HTTPError) + def get_initial_tenant_ids(self, tenant_endpoint_url: str) -> list: + try: + response = requests.get(tenant_endpoint_url, timeout=10) + response.raise_for_status() # Raise an HTTPError for bad responses + + if response.headers["content-type"].lower() == "application/json": + tenant_ids = [tenant["tenantId"] for tenant in response.json()] + else: + logger.warning("Response is not in JSON format.") + except Exception as e: + logger.warning("An unexpected error occurred:", e) + + return tenant_ids + def get_tenant_created_queue_name(self, settings: Dynaconf): return self.get_queue_name_with_suffix( suffix=settings.rabbitmq.tenant_created_event_queue_suffix, pod_name=settings.kubernetes.pod_name @@ -166,9 +222,10 @@ class TenantQueueManager(BaseQueueManager): logger.info(f"Tenant Created: {message}") ch.basic_ack(delivery_tag=method.delivery_tag) - #TODO: replace this w/ working callback - tenant_id = body["tenant_id"] - self.tenant_ids.append(tenant_id) + # TODO: test callback + tenant_id = body["tenantId"] + TenantQueueManager.tenant_ids.append(tenant_id) + self._trigger_event("tenant_created", tenant_id) def on_tenant_deleted(self, ch, method, properties, body): logger.info("Received tenant deleted event") @@ -176,13 +233,38 @@ class TenantQueueManager(BaseQueueManager): logger.info(f"Tenant Deleted: {message}") ch.basic_ack(delivery_tag=method.delivery_tag) - #TODO: replace this w/ working callback - tenant_id = body["tenant_id"] - self.tenant_ids.remove(tenant_id) + # TODO: test callback + tenant_id = body["tenantId"] + TenantQueueManager.tenant_ids.remove(tenant_id) + self._trigger_event("tenant_deleted", tenant_id) + + def _trigger_event(self, event_type, tenant_id): + handler = self.event_handlers.get(event_type) + if handler: + try: + handler(tenant_id) + except Exception as e: + logger.error(f"Error in event handler for {event_type}: {e}", exc_info=True) + + def add_event_handler(self, event_type: str, handler: Callable[[str], None]): + if event_type in self.event_handlers: + self.event_handlers[event_type] = handler + else: + logger.warning(f"Unknown event type: {event_type}") + + def purge_queues(self): + self.establish_connection() + try: + self.channel.queue_purge(self.tenant_created_queue_name) + self.channel.queue_purge(self.tenant_deleted_queue_name) + self.channel.queue_purge(self.tenant_events_dlq_name) + logger.info("Queues purged.") + except pika.exceptions.ChannelWrongStateError: + pass class ServiceQueueManager(BaseQueueManager): - def __init__(self, settings: Dynaconf): + def __init__(self, settings: Dynaconf, tenant_manager: TenantQueueManager): super().__init__(settings) self.service_request_exchange_name = settings.rabbitmq.service_request_exchange_name @@ -190,37 +272,148 @@ class ServiceQueueManager(BaseQueueManager): self.service_queue_prefix = settings.rabbitmq.service_request_queue_prefix self.service_dlq_name = settings.rabbitmq.service_dlq_name + tenant_manager.add_event_handler("tenant_created", self.add_tenant_queue) + tenant_manager.add_event_handler("tenant_deleted", self.delete_tenant_queue) + def initialize_queues(self): - self.channel.exchange_declare(exchange=self.service_request_exchange_name, exchange_type="topic") - queue_name = self.service_queue_prefix + "default" - self.channel.queue_declare(queue=queue_name, arguments={"x-max-priority": 2}) - self.channel.queue_bind(exchange=self.service_request_exchange_name, queue=queue_name) + self.channel.exchange_declare(exchange=self.service_request_exchange_name, exchange_type="direct") + self.channel.exchange_declare(exchange=self.service_response_exchange_name, exchange_type="direct") + + for tenant_id in ServiceQueueManager.tenant_ids: + queue_name = self.service_queue_prefix + "_" + tenant_id + self.channel.queue_declare( + queue=queue_name, + durable=True, + arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.service_dlq_name, + "x-expires": self.queue_expiration_time, # TODO: check if necessary + "x-max-priority": 2, + }, + ) + self.channel.queue_bind(queue_name, self.service_request_exchange_name) def start_consuming(self): - self.channel.queue_declare(queue=self.service_queue_prefix + "default") + for tenant_id in ServiceQueueManager.tenant_ids: + queue_name = self.service_queue_prefix + "_" + tenant_id + message_callback = self._make_on_message_callback(message_processor=MessageProcessor, tenant_id=tenant_id) + self.channel.basic_consume( + queue=queue_name, + on_message_callback=message_callback, + ) + logger.info(f"Starting to consume messages for queue {queue_name}...") - self.channel.basic_consume( - queue=self.service_queue_prefix + "default", - on_message_callback=self.react_to_service_request, - auto_ack=True, - ) - - logger.info("Starting to consume messages...") self.channel.start_consuming() + self.connection.ioloop.start() + + def publish_message_to_input_queue( + self, tenant_id: str, message: Union[str, bytes, dict], properties: pika.BasicProperties = None + ): + if isinstance(message, str): + message = message.encode("utf-8") + elif isinstance(message, dict): + message = json.dumps(message).encode("utf-8") + + self.establish_connection() + self.channel.basic_publish( + exchange=self.service_request_exchange_name, + routing_key=tenant_id, + properties=properties, + body=message, + ) + logger.info(f"Published message to queue {tenant_id}.") + + def purge_queues(self): + self.establish_connection() + try: + for tenant_id in ServiceQueueManager.tenant_ids: + queue_name = self.service_queue_prefix + "_" + tenant_id + self.channel.queue_purge(queue_name) + logger.info("Queues purged.") + except pika.exceptions.ChannelWrongStateError: + pass def add_tenant_queue(self, tenant_id: str): queue_name = self.service_queue_prefix + "_" + tenant_id - self.channel.queue_declare(queue_name, durable=True) - self.channel.queue_bind(queue_name, self.service_request_exchange_name) + self.channel.queue_declare( + queue=queue_name, + durable=True, + arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.service_dlq_name, + "x-expires": self.queue_expiration_time, # TODO: check if necessary + }, + ) + self.channel.queue_bind(queue=queue_name, exchange=self.service_request_exchange_name) + # TODO: this is likely not possible due to blocking connection + message_callback = self._make_on_message_callback(message_processor=MessageProcessor, tenant_id=tenant_id) + self.channel.basic_consume( + queue=queue_name, + on_message_callback=message_callback, + ) def delete_tenant_queue(self, tenant_id: str): queue_name = self.service_queue_prefix + "_" + tenant_id self.channel.queue_unbind(queue_name, self.service_request_exchange_name) self.channel.queue_delete(queue_name) - def react_to_service_request(self, ch, method, properties, body): - logger.info("Received service request") - message = json.loads(body) - logger.info(f"Service Request: {message}") - ch.basic_ack(delivery_tag=method.delivery_tag) - + def _make_on_message_callback(self, message_processor: MessageProcessor, tenant_id: str): + def process_message_body_and_await_result(unpacked_message_body): + # Processing the message in a separate thread is necessary for the main thread pika client to be able to + # process data events (e.g. heartbeats) while the message is being processed. + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_pool_executor: + logger.info("Processing payload in separate thread.") + future = thread_pool_executor.submit(message_processor, unpacked_message_body) + + # TODO: This block is probably not necessary, but kept since the implications of removing it are + # unclear. Remove it in a future iteration where less changes are being made to the code base. + # while future.running(): + # logger.debug("Waiting for payload processing to finish...") + # self.connection.sleep(self.connection_sleep) + + return future.result() + + def on_message_callback(channel, method, properties, body): + logger.info(f"Received message from queue with delivery_tag {method.delivery_tag}.") + + if method.redelivered: + logger.warning(f"Declining message with {method.delivery_tag=} due to it being redelivered.") + channel.basic_nack(method.delivery_tag, requeue=False) + return + + if body.decode("utf-8") == "STOP": + logger.info("Received stop signal, stopping consuming...") + channel.basic_ack(delivery_tag=method.delivery_tag) + self.stop_consuming() + return + + try: + filtered_message_headers = ( + {k: v for k, v in properties.headers.items() if k.lower().startswith("x-")} + if properties.headers + else {} + ) + logger.debug(f"Processing message with {filtered_message_headers=}.") + result: dict = ( + process_message_body_and_await_result({**json.loads(body), **filtered_message_headers}) or {} + ) + + channel.basic_publish( + exchange=self.service_request_exchange_name, + routing_key=tenant_id, + body=json.dumps(result).encode(), + properties=pika.BasicProperties(headers=filtered_message_headers), + ) + logger.info(f"Published result to queue {tenant_id}.") + + channel.basic_ack(delivery_tag=method.delivery_tag) + logger.debug(f"Message with {method.delivery_tag=} acknowledged.") + except FileNotFoundError as e: + logger.warning(f"{e}, declining message with {method.delivery_tag=}.") + channel.basic_nack(method.delivery_tag, requeue=False) + except Exception: + logger.warning(f"Failed to process message with {method.delivery_tag=}, declining...", exc_info=True) + channel.basic_nack(method.delivery_tag, requeue=False) + raise + + return on_message_callback From 30330937ce1a2fa5278c9c3c5966885f12e06218 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Tue, 2 Jul 2024 18:07:23 +0200 Subject: [PATCH 05/35] feat: wip for multiple tenants --- pyinfra/examples.py | 6 +- pyinfra/queue/multiple_tenants.py | 95 ++++++-- pyinfra/queue/sequential_tenants.py | 342 ++++++++++++++++++++++++++++ scripts/send_request.py | 5 +- 4 files changed, 420 insertions(+), 28 deletions(-) create mode 100644 pyinfra/queue/sequential_tenants.py diff --git a/pyinfra/examples.py b/pyinfra/examples.py index 6b62f98..a3fb9be 100644 --- a/pyinfra/examples.py +++ b/pyinfra/examples.py @@ -4,7 +4,8 @@ from kn_utils.logging import logger from pyinfra.config.loader import get_pyinfra_validators, validate_settings from pyinfra.queue.callback import Callback -from pyinfra.queue.manager import QueueManager +# from pyinfra.queue.manager import QueueManager +from pyinfra.queue.sequential_tenants import QueueManager from pyinfra.utils.opentelemetry import instrument_pika, setup_trace, instrument_app from pyinfra.webserver.prometheus import ( add_prometheus_endpoint, @@ -52,4 +53,5 @@ def start_standard_queue_consumer( webserver_thread = create_webserver_thread_from_settings(app, settings) webserver_thread.start() - queue_manager.start_consuming(callback) \ No newline at end of file + # queue_manager.start_consuming(callback) + queue_manager.start_sequential_consume(callback) \ No newline at end of file diff --git a/pyinfra/queue/multiple_tenants.py b/pyinfra/queue/multiple_tenants.py index a3eac52..156736f 100644 --- a/pyinfra/queue/multiple_tenants.py +++ b/pyinfra/queue/multiple_tenants.py @@ -1,19 +1,16 @@ import atexit +import asyncio import concurrent.futures import pika -import os import json import logging import signal import sys import requests -import time import pika.exceptions -from threading import Thread from dynaconf import Dynaconf from typing import Callable, Union from kn_utils.logging import logger -from pika.adapters.blocking_connection import BlockingChannel, BlockingConnection from pika.adapters.select_connection import SelectConnection from pika.channel import Channel from retry import retry @@ -21,7 +18,6 @@ from retry import retry from pyinfra.config.loader import validate_settings from pyinfra.config.validators import queue_manager_validators - pika_logger = logging.getLogger("pika") pika_logger.setLevel(logging.WARNING) # disables non-informative pika log clutter @@ -72,19 +68,37 @@ class BaseQueueManager: # logger.info("Connection to RabbitMQ established, channel open.") + @retry(tries=3, delay=5, jitter=(1, 3), logger=logger) def establish_connection(self): if self.connection and self.connection.is_open: logger.debug("Connection to RabbitMQ already established.") return logger.info("Establishing connection to RabbitMQ...") - self.connection = SelectConnection(parameters=self.connection_parameters, + return SelectConnection(parameters=self.connection_parameters, on_open_callback=self.on_connection_open, on_open_error_callback=self.on_connection_open_error, on_close_callback=self.on_connection_close) + def close_connection(self): + # self._consuming = False + if self.connection.is_closing or self.connection.is_closed: + logger.info('Connection is closing or already closed') + else: + logger.info('Closing connection') + self.connection.close() + def on_connection_open(self, unused_connection): logger.debug("Connection opened") + self.open_channel() + + def open_channel(self): + """Open a new channel with RabbitMQ by issuing the Channel.Open RPC + command. When RabbitMQ responds that the channel is open, the + on_channel_open callback will be invoked by pika. + + """ + logger.debug('Creating a new channel') self.connection.channel(on_open_callback=self.on_channel_open) def on_connection_open_error(self, unused_connection, err): @@ -98,9 +112,33 @@ class BaseQueueManager: def on_channel_open(self, channel): logger.debug("Channel opened") self.channel = channel + # self.add_on_channel_close_callback() self.channel.basic_qos(prefetch_count=1) self.initialize_queues() + + # def add_on_channel_close_callback(self): + # """This method tells pika to call the on_channel_closed method if + # RabbitMQ unexpectedly closes the channel. + + # """ + # logger.debug('Adding channel close callback') + # self.channel.add_on_close_callback(self.on_channel_closed) + + # def on_channel_closed(self, channel, reason): + # """Invoked by pika when RabbitMQ unexpectedly closes the channel. + # Channels are usually closed if you attempt to do something that + # violates the protocol, such as re-declare an exchange or queue with + # different parameters. In this case, we'll close the connection + # to shutdown the object. + + # :param pika.channel.Channel: The closed channel + # :param Exception reason: why the channel was closed + + # """ + # logger.warning('Channel %i was closed: %s', channel, reason) + # self.close_connection() + def is_ready(self): self.establish_connection() return self.channel.is_open @@ -134,7 +172,7 @@ class TenantQueueManager(BaseQueueManager): self.tenant_events_dlq_name = self.get_tenant_events_dlq_name(settings) self.event_handlers = {"tenant_created": [], "tenant_deleted": []} - TenantQueueManager.tenant_ids = self.get_initial_tenant_ids( + self.get_initial_tenant_ids( tenant_endpoint_url=settings.storage.tenant_server.endpoint ) @@ -146,7 +184,6 @@ class TenantQueueManager(BaseQueueManager): arguments={ "x-dead-letter-exchange": "", "x-dead-letter-routing-key": self.tenant_events_dlq_name, - "x-expires": self.queue_expiration_time, }, durable=True, ) @@ -155,13 +192,11 @@ class TenantQueueManager(BaseQueueManager): arguments={ "x-dead-letter-exchange": "", "x-dead-letter-routing-key": self.tenant_events_dlq_name, - "x-expires": self.queue_expiration_time, }, durable=True, ) self.channel.queue_declare( queue=self.tenant_events_dlq_name, - arguments={"x-expires": self.queue_expiration_time}, durable=True, ) @@ -175,6 +210,11 @@ class TenantQueueManager(BaseQueueManager): self.channel.basic_consume(queue=self.tenant_created_queue_name, on_message_callback=self.on_tenant_created) self.channel.basic_consume(queue=self.tenant_deleted_queue_name, on_message_callback=self.on_tenant_deleted) + def start(self): + self.connection = self.establish_connection() + if self.connection is not None: + self.connection.ioloop.start() + @retry(tries=3, delay=5, jitter=(1, 3), logger=logger, exceptions=requests.exceptions.HTTPError) def get_initial_tenant_ids(self, tenant_endpoint_url: str) -> list: try: @@ -182,13 +222,13 @@ class TenantQueueManager(BaseQueueManager): response.raise_for_status() # Raise an HTTPError for bad responses if response.headers["content-type"].lower() == "application/json": - tenant_ids = [tenant["tenantId"] for tenant in response.json()] + tenants = [tenant["tenantId"] for tenant in response.json()] else: logger.warning("Response is not in JSON format.") except Exception as e: logger.warning("An unexpected error occurred:", e) - return tenant_ids + self.tenant_ids.extend(tenants) def get_tenant_created_queue_name(self, settings: Dynaconf): return self.get_queue_name_with_suffix( @@ -224,10 +264,10 @@ class TenantQueueManager(BaseQueueManager): # TODO: test callback tenant_id = body["tenantId"] - TenantQueueManager.tenant_ids.append(tenant_id) + self.tenant_ids.append(tenant_id) self._trigger_event("tenant_created", tenant_id) - def on_tenant_deleted(self, ch, method, properties, body): + def on_tenant_deleted(self, ch: Channel, method, properties, body): logger.info("Received tenant deleted event") message = json.loads(body) logger.info(f"Tenant Deleted: {message}") @@ -235,7 +275,7 @@ class TenantQueueManager(BaseQueueManager): # TODO: test callback tenant_id = body["tenantId"] - TenantQueueManager.tenant_ids.remove(tenant_id) + self.tenant_ids.remove(tenant_id) self._trigger_event("tenant_deleted", tenant_id) def _trigger_event(self, event_type, tenant_id): @@ -279,7 +319,7 @@ class ServiceQueueManager(BaseQueueManager): self.channel.exchange_declare(exchange=self.service_request_exchange_name, exchange_type="direct") self.channel.exchange_declare(exchange=self.service_response_exchange_name, exchange_type="direct") - for tenant_id in ServiceQueueManager.tenant_ids: + for tenant_id in self.tenant_ids: queue_name = self.service_queue_prefix + "_" + tenant_id self.channel.queue_declare( queue=queue_name, @@ -293,18 +333,22 @@ class ServiceQueueManager(BaseQueueManager): ) self.channel.queue_bind(queue_name, self.service_request_exchange_name) - def start_consuming(self): - for tenant_id in ServiceQueueManager.tenant_ids: + def start_consuming(self, message_processor: Callable): + self.connection = self.establish_connection() + for tenant_id in self.tenant_ids: queue_name = self.service_queue_prefix + "_" + tenant_id - message_callback = self._make_on_message_callback(message_processor=MessageProcessor, tenant_id=tenant_id) + message_callback = self._make_on_message_callback(message_processor=message_processor, tenant_id=tenant_id) self.channel.basic_consume( queue=queue_name, on_message_callback=message_callback, ) logger.info(f"Starting to consume messages for queue {queue_name}...") - self.channel.start_consuming() - self.connection.ioloop.start() + # self.channel.start_consuming() + if self.connection is not None: + self.connection.ioloop.start() + else: + logger.info("Connection is None, cannot start ioloop") def publish_message_to_input_queue( self, tenant_id: str, message: Union[str, bytes, dict], properties: pika.BasicProperties = None @@ -326,7 +370,7 @@ class ServiceQueueManager(BaseQueueManager): def purge_queues(self): self.establish_connection() try: - for tenant_id in ServiceQueueManager.tenant_ids: + for tenant_id in self.tenant_ids: queue_name = self.service_queue_prefix + "_" + tenant_id self.channel.queue_purge(queue_name) logger.info("Queues purged.") @@ -371,7 +415,10 @@ class ServiceQueueManager(BaseQueueManager): # logger.debug("Waiting for payload processing to finish...") # self.connection.sleep(self.connection_sleep) - return future.result() + loop = asyncio.get_event_loop() + return loop.run_in_executor(None, future.result) + + # return future.result() def on_message_callback(channel, method, properties, body): logger.info(f"Received message from queue with delivery_tag {method.delivery_tag}.") @@ -416,4 +463,4 @@ class ServiceQueueManager(BaseQueueManager): channel.basic_nack(method.delivery_tag, requeue=False) raise - return on_message_callback + return on_message_callback \ No newline at end of file diff --git a/pyinfra/queue/sequential_tenants.py b/pyinfra/queue/sequential_tenants.py new file mode 100644 index 0000000..e0514a2 --- /dev/null +++ b/pyinfra/queue/sequential_tenants.py @@ -0,0 +1,342 @@ +import atexit +import concurrent.futures +import json +import logging +import requests +import signal +import sys +import time +from typing import Callable, Union + +import pika +import pika.exceptions +from dynaconf import Dynaconf +from kn_utils.logging import logger +from pika.adapters.blocking_connection import BlockingChannel, BlockingConnection +from retry import retry + +from pyinfra.config.loader import validate_settings +from pyinfra.config.validators import queue_manager_validators + +logger.set_level("DEBUG") +pika_logger = logging.getLogger("pika") +pika_logger.setLevel(logging.WARNING) # disables non-informative pika log clutter + +MessageProcessor = Callable[[dict], dict] + + +class QueueManager: + def __init__(self, settings: Dynaconf): + validate_settings(settings, queue_manager_validators) + + self.tenant_created_queue_name = self.get_tenant_created_queue_name(settings) + self.tenant_deleted_queue_name = self.get_tenant_deleted_queue_name(settings) + self.tenant_events_dlq_name = self.get_tenant_events_dlq_name(settings) + + self.connection_sleep = settings.rabbitmq.connection_sleep + self.queue_expiration_time = settings.rabbitmq.queue_expiration_time + + self.tenant_exchange_name = settings.rabbitmq.tenant_exchange_name + self.service_request_exchange_name = settings.rabbitmq.service_request_exchange_name + self.service_response_exchange_name = settings.rabbitmq.service_response_exchange_name + + self.service_queue_prefix = settings.rabbitmq.service_request_queue_prefix + self.service_dlq_name = settings.rabbitmq.service_dlq_name + + self.connection_parameters = self.create_connection_parameters(settings) + + self.connection: Union[BlockingConnection, None] = None + self.channel: Union[BlockingChannel, None] = None + + self.tenant_ids = self.get_initial_tenant_ids(tenant_endpoint_url=settings.storage.tenant_server.endpoint) + + self._consuming = False + + atexit.register(self.stop_consuming) + signal.signal(signal.SIGTERM, self._handle_stop_signal) + signal.signal(signal.SIGINT, self._handle_stop_signal) + + @staticmethod + def create_connection_parameters(settings: Dynaconf): + credentials = pika.PlainCredentials(username=settings.rabbitmq.username, password=settings.rabbitmq.password) + pika_connection_params = { + "host": settings.rabbitmq.host, + "port": settings.rabbitmq.port, + "credentials": credentials, + "heartbeat": settings.rabbitmq.heartbeat, + } + + return pika.ConnectionParameters(**pika_connection_params) + + @retry(tries=3, delay=5, jitter=(1, 3), logger=logger, exceptions=requests.exceptions.HTTPError) + def get_initial_tenant_ids(self, tenant_endpoint_url: str) -> list: + try: + response = requests.get(tenant_endpoint_url, timeout=10) + response.raise_for_status() # Raise an HTTPError for bad responses + + if response.headers["content-type"].lower() == "application/json": + tenants = [tenant["tenantId"] for tenant in response.json()] + else: + logger.warning("Response is not in JSON format.") + except Exception as e: + logger.warning("An unexpected error occurred:", e) + + return tenants + + def get_tenant_created_queue_name(self, settings: Dynaconf): + return self.get_queue_name_with_suffix( + suffix=settings.rabbitmq.tenant_created_event_queue_suffix, pod_name=settings.kubernetes.pod_name + ) + + def get_tenant_deleted_queue_name(self, settings: Dynaconf): + return self.get_queue_name_with_suffix( + suffix=settings.rabbitmq.tenant_deleted_event_queue_suffix, pod_name=settings.kubernetes.pod_name + ) + + def get_tenant_events_dlq_name(self, settings: Dynaconf): + return self.get_queue_name_with_suffix( + suffix=settings.rabbitmq.tenant_event_dlq_suffix, pod_name=settings.kubernetes.pod_name + ) + + def get_queue_name_with_suffix(self, suffix: str, pod_name: str): + if not self.use_default_queue_name() and pod_name: + return f"{pod_name}{suffix}" + return self.get_default_queue_name() + + def use_default_queue_name(self): + return False + + def get_default_queue_name(self): + raise NotImplementedError("Queue name method not implemented") + + @retry(tries=3, delay=5, jitter=(1, 3), logger=logger) + def establish_connection(self): + # TODO: set sensible retry parameters + if self.connection and self.connection.is_open: + logger.debug("Connection to RabbitMQ already established.") + return + + logger.info("Establishing connection to RabbitMQ...") + self.connection = pika.BlockingConnection(parameters=self.connection_parameters) + + logger.debug("Opening channel...") + self.channel = self.connection.channel() + self.channel.basic_qos(prefetch_count=1) + + args = { + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.tenant_events_dlq_name, + } + + ### Declare exchanges for tenants and responses + self.channel.exchange_declare(exchange=self.tenant_exchange_name, exchange_type="topic") + self.channel.exchange_declare(exchange=self.service_request_exchange_name, exchange_type="direct") + self.channel.exchange_declare(exchange=self.service_response_exchange_name, exchange_type="direct") + + self.channel.queue_declare(self.tenant_created_queue_name, arguments=args, auto_delete=False, durable=True) + self.channel.queue_declare(self.tenant_deleted_queue_name, arguments=args, auto_delete=False, durable=True) + + self.channel.queue_bind( + exchange=self.tenant_exchange_name, queue=self.tenant_created_queue_name, routing_key="tenant.created" + ) + self.channel.queue_bind( + exchange=self.tenant_exchange_name, queue=self.tenant_deleted_queue_name, routing_key="tenant.delete" + ) + + for tenant_id in self.tenant_ids: + queue_name = self.service_queue_prefix + "_" + tenant_id + self.channel.queue_declare( + queue=queue_name, + durable=True, + arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.service_dlq_name, + "x-expires": self.queue_expiration_time, # TODO: check if necessary + "x-max-priority": 2, + }, + ) + self.channel.queue_bind( + queue=queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id + ) + + logger.info("Connection to RabbitMQ established, channel open.") + + def is_ready(self): + self.establish_connection() + return self.channel.is_open + + @retry(exceptions=pika.exceptions.AMQPConnectionError, tries=3, delay=5, jitter=(1, 3), logger=logger) + def start_sequential_consume(self, message_processor: Callable): + + self.establish_connection() + self._consuming = True + + try: + while self._consuming: + for tenant_id in self.tenant_ids: + queue_name = self.service_queue_prefix + "_" + tenant_id + method_frame, properties, body = self.channel.basic_get(queue_name) + if method_frame: + on_message_callback = self._make_on_message_callback(message_processor, tenant_id) + on_message_callback(self.channel, method_frame, properties, body) + else: + logger.debug("No message returned") + time.sleep(self.connection_sleep) + + ### Handle tenant events + self.check_tenant_created_queue() + self.check_tenant_deleted_queue() + + except KeyboardInterrupt: + logger.info("Exiting...") + finally: + self.stop_consuming() + + def check_tenant_created_queue(self): + while True: + method_frame, properties, body = self.channel.basic_get(self.tenant_created_queue_name) + if method_frame: + self.channel.basic_ack(delivery_tag=method_frame.delivery_tag) + message = json.loads(body) + tenant_id = message["tenantId"] + self.on_tenant_created(tenant_id) + else: + logger.debug("No more tenant created events.") + break + + def check_tenant_deleted_queue(self): + while True: + method_frame, properties, body = self.channel.basic_get(self.tenant_deleted_queue_name) + if method_frame: + self.channel.basic_ack(delivery_tag=method_frame.delivery_tag) + message = json.loads(body) + tenant_id = message["tenantId"] + self.on_tenant_deleted(tenant_id) + else: + logger.debug("No more tenant deleted events.") + break + + def on_tenant_created(self, tenant_id: str): + queue_name = self.service_queue_prefix + "_" + tenant_id + self.channel.queue_declare( + queue=queue_name, + durable=True, + arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.service_dlq_name, + "x-expires": self.queue_expiration_time, # TODO: check if necessary + }, + ) + self.channel.queue_bind(queue=queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) + self.tenant_ids.append(tenant_id) + + def on_tenant_deleted(self, tenant_id: str): + queue_name = self.service_queue_prefix + "_" + tenant_id + self.channel.queue_unbind(queue=queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) + self.channel.queue_delete(queue_name) + self.tenant_ids.remove(tenant_id) + + def stop_consuming(self): + self._consuming = False + if self.channel and self.channel.is_open: + logger.info("Stopping consuming...") + self.channel.stop_consuming() + logger.info("Closing channel...") + self.channel.close() + + if self.connection and self.connection.is_open: + logger.info("Closing connection to RabbitMQ...") + self.connection.close() + + def publish_message_to_input_queue( + self, tenant_id: str, message: Union[str, bytes, dict], properties: pika.BasicProperties = None + ): + if isinstance(message, str): + message = message.encode("utf-8") + elif isinstance(message, dict): + message = json.dumps(message).encode("utf-8") + + self.establish_connection() + self.channel.basic_publish( + exchange=self.service_request_exchange_name, + routing_key=tenant_id, + properties=properties, + body=message, + ) + logger.info(f"Published message to queue {tenant_id}.") + + def purge_queues(self): + self.establish_connection() + try: + self.channel.queue_purge(self.tenant_created_queue_name) + self.channel.queue_purge(self.tenant_deleted_queue_name) + for tenant_id in self.tenant_ids: + queue_name = self.service_queue_prefix + "_" + tenant_id + self.channel.queue_purge(queue_name) + logger.info("Queues purged.") + except pika.exceptions.ChannelWrongStateError: + pass + + def get_message_from_output_queue(self, queue: str): + self.establish_connection() + return self.channel.basic_get(queue, auto_ack=True) + + def _make_on_message_callback(self, message_processor: MessageProcessor, tenant_id: str): + def process_message_body_and_await_result(unpacked_message_body): + # Processing the message in a separate thread is necessary for the main thread pika client to be able to + # process data events (e.g. heartbeats) while the message is being processed. + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_pool_executor: + logger.info("Processing payload in separate thread.") + future = thread_pool_executor.submit(message_processor, unpacked_message_body) + + return future.result() + + def on_message_callback(channel, method, properties, body): + logger.info(f"Received message from queue with delivery_tag {method.delivery_tag}.") + + if method.redelivered: + logger.warning(f"Declining message with {method.delivery_tag=} due to it being redelivered.") + channel.basic_nack(method.delivery_tag, requeue=False) + return + + if body.decode("utf-8") == "STOP": + logger.info(f"Received stop signal, stopping consuming...") + channel.basic_ack(delivery_tag=method.delivery_tag) + self.stop_consuming() + return + + try: + filtered_message_headers = ( + {k: v for k, v in properties.headers.items() if k.lower().startswith("x-")} + if properties.headers + else {} + ) + logger.debug(f"Processing message with {filtered_message_headers=}.") + result: dict = ( + process_message_body_and_await_result({**json.loads(body), **filtered_message_headers}) or {} + ) + + channel.basic_publish( + exchange=self.service_response_exchange_name, + routing_key=tenant_id, + body=json.dumps(result).encode(), + properties=pika.BasicProperties(headers=filtered_message_headers), + ) + logger.info(f"Published result to queue {tenant_id}.") + + channel.basic_ack(delivery_tag=method.delivery_tag) + logger.debug(f"Message with {method.delivery_tag=} acknowledged.") + except FileNotFoundError as e: + logger.warning(f"{e}, declining message with {method.delivery_tag=}.") + channel.basic_nack(method.delivery_tag, requeue=False) + except Exception: + logger.warning(f"Failed to process message with {method.delivery_tag=}, declining...", exc_info=True) + channel.basic_nack(method.delivery_tag, requeue=False) + raise + + return on_message_callback + + def _handle_stop_signal(self, signum, *args, **kwargs): + logger.info(f"Received signal {signum}, stopping consuming...") + self.stop_consuming() + sys.exit(0) diff --git a/scripts/send_request.py b/scripts/send_request.py index d1f1fda..c7d2046 100644 --- a/scripts/send_request.py +++ b/scripts/send_request.py @@ -5,7 +5,8 @@ from operator import itemgetter from kn_utils.logging import logger from pyinfra.config.loader import load_settings, local_pyinfra_root_path -from pyinfra.queue.manager import QueueManager +# from pyinfra.queue.manager import QueueManager +from pyinfra.queue.sequential_tenants import QueueManager from pyinfra.storage.storages.s3 import get_s3_storage_from_settings settings = load_settings(local_pyinfra_root_path / "config/") @@ -41,7 +42,7 @@ def main(): message = upload_json_and_make_message_body() - queue_manager.publish_message_to_input_queue(message) + queue_manager.publish_message_to_input_queue(tenant_id="redaction", message=message) logger.info(f"Put {message} on {settings.rabbitmq.input_queue}.") storage = get_s3_storage_from_settings(settings) From c81d967aeec609365d097b0364e4dcb9044ac3a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Wed, 3 Jul 2024 17:51:47 +0200 Subject: [PATCH 06/35] feat: wip for multiple tenants --- pyinfra/examples.py | 19 +- pyinfra/queue/sequential_tenants.py | 1 + ...ultiple_tenants.py => threaded_tenants.py} | 287 +++++++----------- 3 files changed, 129 insertions(+), 178 deletions(-) rename pyinfra/queue/{multiple_tenants.py => threaded_tenants.py} (62%) diff --git a/pyinfra/examples.py b/pyinfra/examples.py index a3fb9be..8856c5b 100644 --- a/pyinfra/examples.py +++ b/pyinfra/examples.py @@ -1,11 +1,13 @@ from dynaconf import Dynaconf from fastapi import FastAPI from kn_utils.logging import logger - +import multiprocessing +from threading import Thread from pyinfra.config.loader import get_pyinfra_validators, validate_settings from pyinfra.queue.callback import Callback # from pyinfra.queue.manager import QueueManager from pyinfra.queue.sequential_tenants import QueueManager +from pyinfra.queue.threaded_tenants import ServiceQueueManager, TenantQueueManager from pyinfra.utils.opentelemetry import instrument_pika, setup_trace, instrument_app from pyinfra.webserver.prometheus import ( add_prometheus_endpoint, @@ -35,7 +37,8 @@ def start_standard_queue_consumer( app = app or FastAPI() - queue_manager = QueueManager(settings) + tenant_manager = TenantQueueManager(settings) + service_manager = ServiceQueueManager(settings) if settings.metrics.prometheus.enabled: logger.info("Prometheus metrics enabled.") @@ -48,10 +51,18 @@ def start_standard_queue_consumer( instrument_pika() instrument_app(app) - app = add_health_check_endpoint(app, queue_manager.is_ready) + # app = add_health_check_endpoint(app, queue_manager.is_ready) + app = add_health_check_endpoint(app, service_manager.is_ready) webserver_thread = create_webserver_thread_from_settings(app, settings) webserver_thread.start() # queue_manager.start_consuming(callback) - queue_manager.start_sequential_consume(callback) \ No newline at end of file + # queue_manager.start_sequential_consume(callback) + # p1 = multiprocessing.Process(target=tenant_manager.start_consuming, daemon=True) + # p2 = multiprocessing.Process(target=service_manager.start_sequential_consume, kwargs={"callback":callback}, daemon=True) + thread = Thread(target=tenant_manager.start_consuming, daemon=True) + thread.start() + # p1.start() + # p2.start() + service_manager.start_sequential_consume(callback) \ No newline at end of file diff --git a/pyinfra/queue/sequential_tenants.py b/pyinfra/queue/sequential_tenants.py index e0514a2..b1eb70f 100644 --- a/pyinfra/queue/sequential_tenants.py +++ b/pyinfra/queue/sequential_tenants.py @@ -117,6 +117,7 @@ class QueueManager: return logger.info("Establishing connection to RabbitMQ...") + logger.info(self.__class__.__name__) self.connection = pika.BlockingConnection(parameters=self.connection_parameters) logger.debug("Opening channel...") diff --git a/pyinfra/queue/multiple_tenants.py b/pyinfra/queue/threaded_tenants.py similarity index 62% rename from pyinfra/queue/multiple_tenants.py rename to pyinfra/queue/threaded_tenants.py index 156736f..6ec9327 100644 --- a/pyinfra/queue/multiple_tenants.py +++ b/pyinfra/queue/threaded_tenants.py @@ -1,17 +1,18 @@ import atexit -import asyncio import concurrent.futures import pika +import queue import json import logging import signal import sys import requests +import time import pika.exceptions from dynaconf import Dynaconf from typing import Callable, Union +from pika.adapters.blocking_connection import BlockingChannel, BlockingConnection from kn_utils.logging import logger -from pika.adapters.select_connection import SelectConnection from pika.channel import Channel from retry import retry @@ -25,14 +26,14 @@ MessageProcessor = Callable[[dict], dict] class BaseQueueManager: - tenant_ids = [] + tenant_exchange = queue.Queue() def __init__(self, settings: Dynaconf): validate_settings(settings, queue_manager_validators) self.connection_parameters = self.create_connection_parameters(settings) - self.connection = None - self.channel = None + self.connection: Union[BlockingConnection, None] = None + self.channel: Union[BlockingChannel, None] = None self.connection_sleep = settings.rabbitmq.connection_sleep self.queue_expiration_time = settings.rabbitmq.queue_expiration_time self.tenant_exchange_name = settings.rabbitmq.tenant_exchange_name @@ -52,22 +53,6 @@ class BaseQueueManager: } return pika.ConnectionParameters(**pika_connection_params) - # @retry(tries=3, delay=5, jitter=(1, 3), logger=logger) - # def establish_connection(self): - # if self.connection and self.connection.is_open: - # logger.debug("Connection to RabbitMQ already established.") - # return - - # logger.info("Establishing connection to RabbitMQ...") - # self.connection = pika.BlockingConnection(parameters=self.connection_parameters) - - # logger.debug("Opening channel...") - # self.channel = self.connection.channel() - # self.channel.basic_qos(prefetch_count=1) - # self.initialize_queues() - - # logger.info("Connection to RabbitMQ established, channel open.") - @retry(tries=3, delay=5, jitter=(1, 3), logger=logger) def establish_connection(self): if self.connection and self.connection.is_open: @@ -75,69 +60,15 @@ class BaseQueueManager: return logger.info("Establishing connection to RabbitMQ...") - return SelectConnection(parameters=self.connection_parameters, - on_open_callback=self.on_connection_open, - on_open_error_callback=self.on_connection_open_error, - on_close_callback=self.on_connection_close) - - def close_connection(self): - # self._consuming = False - if self.connection.is_closing or self.connection.is_closed: - logger.info('Connection is closing or already closed') - else: - logger.info('Closing connection') - self.connection.close() - - def on_connection_open(self, unused_connection): - logger.debug("Connection opened") - self.open_channel() + logger.info(self.__class__.__name__) + self.connection = pika.BlockingConnection(parameters=self.connection_parameters) - def open_channel(self): - """Open a new channel with RabbitMQ by issuing the Channel.Open RPC - command. When RabbitMQ responds that the channel is open, the - on_channel_open callback will be invoked by pika. - - """ - logger.debug('Creating a new channel') - self.connection.channel(on_open_callback=self.on_channel_open) - - def on_connection_open_error(self, unused_connection, err): - logger.error(f"Connection open failed, reopening in {self.connection_sleep} seconds: {err}") - self.connection.ioloop.call_later(self.connection_sleep, self.establish_connection) - - def on_connection_close(self, unused_connection, reason): - logger.warning(f"Connection closed, reopening in {self.connection_sleep} seconds: {reason}") - self.connection.ioloop.call_later(self.connection_sleep, self.establish_connection) - - def on_channel_open(self, channel): - logger.debug("Channel opened") - self.channel = channel - # self.add_on_channel_close_callback() + logger.debug("Opening channel...") + self.channel = self.connection.channel() self.channel.basic_qos(prefetch_count=1) self.initialize_queues() - - # def add_on_channel_close_callback(self): - # """This method tells pika to call the on_channel_closed method if - # RabbitMQ unexpectedly closes the channel. - - # """ - # logger.debug('Adding channel close callback') - # self.channel.add_on_close_callback(self.on_channel_closed) - - # def on_channel_closed(self, channel, reason): - # """Invoked by pika when RabbitMQ unexpectedly closes the channel. - # Channels are usually closed if you attempt to do something that - # violates the protocol, such as re-declare an exchange or queue with - # different parameters. In this case, we'll close the connection - # to shutdown the object. - - # :param pika.channel.Channel: The closed channel - # :param Exception reason: why the channel was closed - - # """ - # logger.warning('Channel %i was closed: %s', channel, reason) - # self.close_connection() + logger.info("Connection to RabbitMQ established, channel open.") def is_ready(self): self.establish_connection() @@ -170,11 +101,6 @@ class TenantQueueManager(BaseQueueManager): self.tenant_created_queue_name = self.get_tenant_created_queue_name(settings) self.tenant_deleted_queue_name = self.get_tenant_deleted_queue_name(settings) self.tenant_events_dlq_name = self.get_tenant_events_dlq_name(settings) - self.event_handlers = {"tenant_created": [], "tenant_deleted": []} - - self.get_initial_tenant_ids( - tenant_endpoint_url=settings.storage.tenant_server.endpoint - ) def initialize_queues(self): self.channel.exchange_declare(exchange=self.tenant_exchange_name, exchange_type="topic") @@ -206,29 +132,20 @@ class TenantQueueManager(BaseQueueManager): self.channel.queue_bind( exchange=self.tenant_exchange_name, queue=self.tenant_deleted_queue_name, routing_key="tenant.delete" ) + + @retry(exceptions=pika.exceptions.AMQPConnectionError, tries=3, delay=5, jitter=(1, 3), logger=logger) + def start_consuming(self): - self.channel.basic_consume(queue=self.tenant_created_queue_name, on_message_callback=self.on_tenant_created) - self.channel.basic_consume(queue=self.tenant_deleted_queue_name, on_message_callback=self.on_tenant_deleted) - - def start(self): - self.connection = self.establish_connection() - if self.connection is not None: - self.connection.ioloop.start() - - @retry(tries=3, delay=5, jitter=(1, 3), logger=logger, exceptions=requests.exceptions.HTTPError) - def get_initial_tenant_ids(self, tenant_endpoint_url: str) -> list: try: - response = requests.get(tenant_endpoint_url, timeout=10) - response.raise_for_status() # Raise an HTTPError for bad responses - - if response.headers["content-type"].lower() == "application/json": - tenants = [tenant["tenantId"] for tenant in response.json()] - else: - logger.warning("Response is not in JSON format.") - except Exception as e: - logger.warning("An unexpected error occurred:", e) - - self.tenant_ids.extend(tenants) + self.establish_connection() + self.channel.basic_consume(queue=self.tenant_created_queue_name, on_message_callback=self.on_tenant_created) + self.channel.basic_consume(queue=self.tenant_deleted_queue_name, on_message_callback=self.on_tenant_deleted) + self.channel.start_consuming() + except Exception: + logger.error("An unexpected error occurred while consuming messages. Consuming will stop.", exc_info=True) + raise + finally: + self.stop_consuming() def get_tenant_created_queue_name(self, settings: Dynaconf): return self.get_queue_name_with_suffix( @@ -259,52 +176,31 @@ class TenantQueueManager(BaseQueueManager): def on_tenant_created(self, ch: Channel, method, properties, body): logger.info("Received tenant created event") message = json.loads(body) - logger.info(f"Tenant Created: {message}") ch.basic_ack(delivery_tag=method.delivery_tag) - # TODO: test callback - tenant_id = body["tenantId"] - self.tenant_ids.append(tenant_id) - self._trigger_event("tenant_created", tenant_id) + tenant_id = message["tenantId"] + self.tenant_exchange.put(("create", tenant_id)) def on_tenant_deleted(self, ch: Channel, method, properties, body): logger.info("Received tenant deleted event") message = json.loads(body) - logger.info(f"Tenant Deleted: {message}") ch.basic_ack(delivery_tag=method.delivery_tag) - - # TODO: test callback - tenant_id = body["tenantId"] - self.tenant_ids.remove(tenant_id) - self._trigger_event("tenant_deleted", tenant_id) - - def _trigger_event(self, event_type, tenant_id): - handler = self.event_handlers.get(event_type) - if handler: - try: - handler(tenant_id) - except Exception as e: - logger.error(f"Error in event handler for {event_type}: {e}", exc_info=True) - - def add_event_handler(self, event_type: str, handler: Callable[[str], None]): - if event_type in self.event_handlers: - self.event_handlers[event_type] = handler - else: - logger.warning(f"Unknown event type: {event_type}") + + tenant_id = message["tenantId"] + self.tenant_exchange.put(("delete", tenant_id)) def purge_queues(self): self.establish_connection() try: self.channel.queue_purge(self.tenant_created_queue_name) self.channel.queue_purge(self.tenant_deleted_queue_name) - self.channel.queue_purge(self.tenant_events_dlq_name) logger.info("Queues purged.") except pika.exceptions.ChannelWrongStateError: pass class ServiceQueueManager(BaseQueueManager): - def __init__(self, settings: Dynaconf, tenant_manager: TenantQueueManager): + def __init__(self, settings: Dynaconf): super().__init__(settings) self.service_request_exchange_name = settings.rabbitmq.service_request_exchange_name @@ -312,8 +208,9 @@ class ServiceQueueManager(BaseQueueManager): self.service_queue_prefix = settings.rabbitmq.service_request_queue_prefix self.service_dlq_name = settings.rabbitmq.service_dlq_name - tenant_manager.add_event_handler("tenant_created", self.add_tenant_queue) - tenant_manager.add_event_handler("tenant_deleted", self.delete_tenant_queue) + self.tenant_ids = self.get_initial_tenant_ids(tenant_endpoint_url=settings.storage.tenant_server.endpoint) + + self._consuming = False def initialize_queues(self): self.channel.exchange_declare(exchange=self.service_request_exchange_name, exchange_type="direct") @@ -331,24 +228,65 @@ class ServiceQueueManager(BaseQueueManager): "x-max-priority": 2, }, ) - self.channel.queue_bind(queue_name, self.service_request_exchange_name) - - def start_consuming(self, message_processor: Callable): - self.connection = self.establish_connection() - for tenant_id in self.tenant_ids: - queue_name = self.service_queue_prefix + "_" + tenant_id - message_callback = self._make_on_message_callback(message_processor=message_processor, tenant_id=tenant_id) - self.channel.basic_consume( - queue=queue_name, - on_message_callback=message_callback, + self.channel.queue_bind( + queue=queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id ) - logger.info(f"Starting to consume messages for queue {queue_name}...") - # self.channel.start_consuming() - if self.connection is not None: - self.connection.ioloop.start() - else: - logger.info("Connection is None, cannot start ioloop") + @retry(tries=3, delay=5, jitter=(1, 3), logger=logger, exceptions=requests.exceptions.HTTPError) + def get_initial_tenant_ids(self, tenant_endpoint_url: str) -> list: + try: + response = requests.get(tenant_endpoint_url, timeout=10) + response.raise_for_status() # Raise an HTTPError for bad responses + + if response.headers["content-type"].lower() == "application/json": + tenants = [tenant["tenantId"] for tenant in response.json()] + else: + logger.warning("Response is not in JSON format.") + except Exception as e: + logger.warning("An unexpected error occurred:", e) + + return tenants + + @retry(exceptions=pika.exceptions.AMQPConnectionError, tries=3, delay=5, jitter=(1, 3), logger=logger) + def start_sequential_consume(self, message_processor: Callable): + + self.establish_connection() + self._consuming = True + + try: + while self._consuming: + for tenant_id in self.tenant_ids: + queue_name = self.service_queue_prefix + "_" + tenant_id + method_frame, properties, body = self.channel.basic_get(queue_name) + if method_frame: + on_message_callback = self._make_on_message_callback(message_processor, tenant_id) + on_message_callback(self.channel, method_frame, properties, body) + else: + logger.debug("No message returned") + time.sleep(self.connection_sleep) + + ### Handle tenant events + self.check_tenant_exchange() + + except KeyboardInterrupt: + logger.info("Exiting...") + finally: + self.stop_consuming() + + def check_tenant_exchange(self): + while True: + try: + event, tenant = self.tenant_exchange.get(block=False) + if event == "create": + self.on_tenant_created(tenant) + elif event == "delete": + self.on_tenant_deleted(tenant) + else: + break + except Exception: + logger.debug("No tenant exchange events.") + break + def publish_message_to_input_queue( self, tenant_id: str, message: Union[str, bytes, dict], properties: pika.BasicProperties = None @@ -377,7 +315,7 @@ class ServiceQueueManager(BaseQueueManager): except pika.exceptions.ChannelWrongStateError: pass - def add_tenant_queue(self, tenant_id: str): + def on_tenant_created(self, tenant_id: str): queue_name = self.service_queue_prefix + "_" + tenant_id self.channel.queue_declare( queue=queue_name, @@ -388,18 +326,16 @@ class ServiceQueueManager(BaseQueueManager): "x-expires": self.queue_expiration_time, # TODO: check if necessary }, ) - self.channel.queue_bind(queue=queue_name, exchange=self.service_request_exchange_name) - # TODO: this is likely not possible due to blocking connection - message_callback = self._make_on_message_callback(message_processor=MessageProcessor, tenant_id=tenant_id) - self.channel.basic_consume( - queue=queue_name, - on_message_callback=message_callback, - ) + self.channel.queue_bind(queue=queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) + self.tenant_ids.append(tenant_id) + logger.debug(f"Added tenant {tenant_id}.") - def delete_tenant_queue(self, tenant_id: str): + def on_tenant_deleted(self, tenant_id: str): queue_name = self.service_queue_prefix + "_" + tenant_id - self.channel.queue_unbind(queue_name, self.service_request_exchange_name) + self.channel.queue_unbind(queue=queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) self.channel.queue_delete(queue_name) + self.tenant_ids.remove(tenant_id) + logger.debug(f"Deleted tenant {tenant_id}.") def _make_on_message_callback(self, message_processor: MessageProcessor, tenant_id: str): def process_message_body_and_await_result(unpacked_message_body): @@ -409,16 +345,7 @@ class ServiceQueueManager(BaseQueueManager): logger.info("Processing payload in separate thread.") future = thread_pool_executor.submit(message_processor, unpacked_message_body) - # TODO: This block is probably not necessary, but kept since the implications of removing it are - # unclear. Remove it in a future iteration where less changes are being made to the code base. - # while future.running(): - # logger.debug("Waiting for payload processing to finish...") - # self.connection.sleep(self.connection_sleep) - - loop = asyncio.get_event_loop() - return loop.run_in_executor(None, future.result) - - # return future.result() + return future.result() def on_message_callback(channel, method, properties, body): logger.info(f"Received message from queue with delivery_tag {method.delivery_tag}.") @@ -429,7 +356,7 @@ class ServiceQueueManager(BaseQueueManager): return if body.decode("utf-8") == "STOP": - logger.info("Received stop signal, stopping consuming...") + logger.info(f"Received stop signal, stopping consuming...") channel.basic_ack(delivery_tag=method.delivery_tag) self.stop_consuming() return @@ -446,7 +373,7 @@ class ServiceQueueManager(BaseQueueManager): ) channel.basic_publish( - exchange=self.service_request_exchange_name, + exchange=self.service_response_exchange_name, routing_key=tenant_id, body=json.dumps(result).encode(), properties=pika.BasicProperties(headers=filtered_message_headers), @@ -463,4 +390,16 @@ class ServiceQueueManager(BaseQueueManager): channel.basic_nack(method.delivery_tag, requeue=False) raise - return on_message_callback \ No newline at end of file + return on_message_callback + + def stop_consuming(self): + self._consuming = False + if self.channel and self.channel.is_open: + logger.info("Stopping consuming...") + self.channel.stop_consuming() + logger.info("Closing channel...") + self.channel.close() + + if self.connection and self.connection.is_open: + logger.info("Closing connection to RabbitMQ...") + self.connection.close() \ No newline at end of file From de41030e693abe7e400082531ed04736b8b6424a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Fri, 5 Jul 2024 13:27:16 +0200 Subject: [PATCH 07/35] feat: wip for multiple tenants --- pyinfra/examples.py | 25 ++- pyinfra/queue/threaded_tenants.py | 260 ++++++++++++++++++------------ pyinfra/storage/utils.py | 30 +++- scripts/send_request.py | 69 ++++++-- tests/docker-compose.yml | 34 ++-- 5 files changed, 275 insertions(+), 143 deletions(-) diff --git a/pyinfra/examples.py b/pyinfra/examples.py index 8856c5b..9f8a227 100644 --- a/pyinfra/examples.py +++ b/pyinfra/examples.py @@ -1,12 +1,9 @@ from dynaconf import Dynaconf from fastapi import FastAPI from kn_utils.logging import logger -import multiprocessing from threading import Thread from pyinfra.config.loader import get_pyinfra_validators, validate_settings from pyinfra.queue.callback import Callback -# from pyinfra.queue.manager import QueueManager -from pyinfra.queue.sequential_tenants import QueueManager from pyinfra.queue.threaded_tenants import ServiceQueueManager, TenantQueueManager from pyinfra.utils.opentelemetry import instrument_pika, setup_trace, instrument_app from pyinfra.webserver.prometheus import ( @@ -47,22 +44,20 @@ def start_standard_queue_consumer( if settings.tracing.enabled: setup_trace(settings) - + instrument_pika() instrument_app(app) - - # app = add_health_check_endpoint(app, queue_manager.is_ready) + app = add_health_check_endpoint(app, service_manager.is_ready) webserver_thread = create_webserver_thread_from_settings(app, settings) webserver_thread.start() - # queue_manager.start_consuming(callback) - # queue_manager.start_sequential_consume(callback) - # p1 = multiprocessing.Process(target=tenant_manager.start_consuming, daemon=True) - # p2 = multiprocessing.Process(target=service_manager.start_sequential_consume, kwargs={"callback":callback}, daemon=True) - thread = Thread(target=tenant_manager.start_consuming, daemon=True) - thread.start() - # p1.start() - # p2.start() - service_manager.start_sequential_consume(callback) \ No newline at end of file + thread_t = Thread(target=tenant_manager.start_consuming, daemon=True) + thread_s = Thread(target=service_manager.start_sequential_basic_get, args=(callback,), daemon=True) + + thread_t.start() + thread_s.start() + + thread_t.join() + thread_s.join() diff --git a/pyinfra/queue/threaded_tenants.py b/pyinfra/queue/threaded_tenants.py index 6ec9327..24fe9c3 100644 --- a/pyinfra/queue/threaded_tenants.py +++ b/pyinfra/queue/threaded_tenants.py @@ -8,6 +8,7 @@ import signal import sys import requests import time +import threading import pika.exceptions from dynaconf import Dynaconf from typing import Callable, Union @@ -19,6 +20,7 @@ from retry import retry from pyinfra.config.loader import validate_settings from pyinfra.config.validators import queue_manager_validators +logger.set_level("DEBUG") pika_logger = logging.getLogger("pika") pika_logger.setLevel(logging.WARNING) # disables non-informative pika log clutter @@ -26,7 +28,10 @@ MessageProcessor = Callable[[dict], dict] class BaseQueueManager: - tenant_exchange = queue.Queue() + tenant_exchange_queue = queue.Queue() + _connection = None + _lock = threading.Lock() + should_stop = threading.Event() def __init__(self, settings: Dynaconf): validate_settings(settings, queue_manager_validators) @@ -43,7 +48,7 @@ class BaseQueueManager: signal.signal(signal.SIGINT, self._handle_stop_signal) @staticmethod - def create_connection_parameters(settings: Dynaconf): + def create_connection_parameters(settings: Dynaconf) -> pika.ConnectionParameters: credentials = pika.PlainCredentials(username=settings.rabbitmq.username, password=settings.rabbitmq.password) pika_connection_params = { "host": settings.rabbitmq.host, @@ -53,42 +58,46 @@ class BaseQueueManager: } return pika.ConnectionParameters(**pika_connection_params) + def get_connection(self) -> BlockingConnection: + with self._lock: + if not self._connection or self._connection.is_closed: + self._connection = pika.BlockingConnection(self.connection_parameters) + return self._connection + @retry(tries=3, delay=5, jitter=(1, 3), logger=logger) - def establish_connection(self): - if self.connection and self.connection.is_open: - logger.debug("Connection to RabbitMQ already established.") - return + def establish_connection(self) -> None: + logger.info(f"Establishing connection to RabbitMQ for {self.__class__.__name__}...") + self.connection = self.get_connection() + if not self.channel or self.channel.is_closed: + logger.debug("Opening channel...") + self.channel = self.connection.channel() + self.channel.basic_qos(prefetch_count=1) + self.initialize_queues() + logger.info(f"Connection to RabbitMQ established for {self.__class__.__name__}, channel open.") - logger.info("Establishing connection to RabbitMQ...") - logger.info(self.__class__.__name__) - self.connection = pika.BlockingConnection(parameters=self.connection_parameters) - - logger.debug("Opening channel...") - self.channel = self.connection.channel() - self.channel.basic_qos(prefetch_count=1) - self.initialize_queues() - - logger.info("Connection to RabbitMQ established, channel open.") - - def is_ready(self): + def is_ready(self) -> bool: self.establish_connection() return self.channel.is_open - def initialize_queues(self): + def initialize_queues(self) -> None: raise NotImplementedError("Subclasses should implement this method") - def stop_consuming(self): - if self.channel and self.channel.is_open: - logger.info("Stopping consuming...") - self.channel.stop_consuming() - logger.info("Closing channel...") - self.channel.close() + def stop_consuming(self) -> None: + if not self.should_stop.is_set(): + self.should_stop.set() + if self.channel and self.channel.is_open: + try: + self.channel.stop_consuming() + self.channel.close() + except Exception as e: + logger.error(f"Error stopping consuming: {e}", exc_info=True) + if self.connection and self.connection.is_open: + try: + self.connection.close() + except Exception as e: + logger.error(f"Error closing connection: {e}", exc_info=True) - if self.connection and self.connection.is_open: - logger.info("Closing connection to RabbitMQ...") - self.connection.close() - - def _handle_stop_signal(self, signum, *args, **kwargs): + def _handle_stop_signal(self, signum, *args, **kwargs) -> None: logger.info(f"Received signal {signum}, stopping consuming...") self.stop_consuming() sys.exit(0) @@ -102,7 +111,7 @@ class TenantQueueManager(BaseQueueManager): self.tenant_deleted_queue_name = self.get_tenant_deleted_queue_name(settings) self.tenant_events_dlq_name = self.get_tenant_events_dlq_name(settings) - def initialize_queues(self): + def initialize_queues(self) -> None: self.channel.exchange_declare(exchange=self.tenant_exchange_name, exchange_type="topic") self.channel.queue_declare( @@ -134,7 +143,7 @@ class TenantQueueManager(BaseQueueManager): ) @retry(exceptions=pika.exceptions.AMQPConnectionError, tries=3, delay=5, jitter=(1, 3), logger=logger) - def start_consuming(self): + def start_consuming(self) -> None: try: self.establish_connection() @@ -147,49 +156,49 @@ class TenantQueueManager(BaseQueueManager): finally: self.stop_consuming() - def get_tenant_created_queue_name(self, settings: Dynaconf): + def get_tenant_created_queue_name(self, settings: Dynaconf) -> str: return self.get_queue_name_with_suffix( suffix=settings.rabbitmq.tenant_created_event_queue_suffix, pod_name=settings.kubernetes.pod_name ) - def get_tenant_deleted_queue_name(self, settings: Dynaconf): + def get_tenant_deleted_queue_name(self, settings: Dynaconf) -> str: return self.get_queue_name_with_suffix( suffix=settings.rabbitmq.tenant_deleted_event_queue_suffix, pod_name=settings.kubernetes.pod_name ) - def get_tenant_events_dlq_name(self, settings: Dynaconf): + def get_tenant_events_dlq_name(self, settings: Dynaconf) -> str: return self.get_queue_name_with_suffix( suffix=settings.rabbitmq.tenant_event_dlq_suffix, pod_name=settings.kubernetes.pod_name ) - def get_queue_name_with_suffix(self, suffix: str, pod_name: str): + def get_queue_name_with_suffix(self, suffix: str, pod_name: str) -> str: if not self.use_default_queue_name() and pod_name: return f"{pod_name}{suffix}" return self.get_default_queue_name() - def use_default_queue_name(self): + def use_default_queue_name(self) -> bool: return False def get_default_queue_name(self): raise NotImplementedError("Queue name method not implemented") - def on_tenant_created(self, ch: Channel, method, properties, body): + def on_tenant_created(self, ch: Channel, method, properties, body) -> None: logger.info("Received tenant created event") message = json.loads(body) ch.basic_ack(delivery_tag=method.delivery_tag) tenant_id = message["tenantId"] - self.tenant_exchange.put(("create", tenant_id)) + self.tenant_exchange_queue.put(("create", tenant_id)) - def on_tenant_deleted(self, ch: Channel, method, properties, body): + def on_tenant_deleted(self, ch: Channel, method, properties, body) -> None: logger.info("Received tenant deleted event") message = json.loads(body) ch.basic_ack(delivery_tag=method.delivery_tag) tenant_id = message["tenantId"] - self.tenant_exchange.put(("delete", tenant_id)) + self.tenant_exchange_queue.put(("delete", tenant_id)) - def purge_queues(self): + def purge_queues(self) -> None: self.establish_connection() try: self.channel.queue_purge(self.tenant_created_queue_name) @@ -198,6 +207,40 @@ class TenantQueueManager(BaseQueueManager): except pika.exceptions.ChannelWrongStateError: pass + def publish_message_to_tenant_created_queue( + self, message: Union[str, bytes, dict], properties: pika.BasicProperties = None + ) -> None: + if isinstance(message, str): + message = message.encode("utf-8") + elif isinstance(message, dict): + message = json.dumps(message).encode("utf-8") + + self.establish_connection() + self.channel.basic_publish( + exchange=self.tenant_exchange_name, + routing_key="tenant.created", + properties=properties, + body=message, + ) + logger.info(f"Published message to queue {self.tenant_created_queue_name}.") + + def publish_message_to_tenant_deleted_queue( + self, message: Union[str, bytes, dict], properties: pika.BasicProperties = None + ) -> None: + if isinstance(message, str): + message = message.encode("utf-8") + elif isinstance(message, dict): + message = json.dumps(message).encode("utf-8") + + self.establish_connection() + self.channel.basic_publish( + exchange=self.tenant_exchange_name, + routing_key="tenant.delete", + properties=properties, + body=message, + ) + logger.info(f"Published message to queue {self.tenant_deleted_queue_name}.") + class ServiceQueueManager(BaseQueueManager): def __init__(self, settings: Dynaconf): @@ -205,21 +248,22 @@ class ServiceQueueManager(BaseQueueManager): self.service_request_exchange_name = settings.rabbitmq.service_request_exchange_name self.service_response_exchange_name = settings.rabbitmq.service_response_exchange_name - self.service_queue_prefix = settings.rabbitmq.service_request_queue_prefix + + self.service_request_queue_prefix = settings.rabbitmq.service_request_queue_prefix + self.service_response_queue_prefix = settings.rabbitmq.service_response_queue_prefix + self.service_dlq_name = settings.rabbitmq.service_dlq_name self.tenant_ids = self.get_initial_tenant_ids(tenant_endpoint_url=settings.storage.tenant_server.endpoint) - self._consuming = False - - def initialize_queues(self): + def initialize_queues(self) -> None: self.channel.exchange_declare(exchange=self.service_request_exchange_name, exchange_type="direct") self.channel.exchange_declare(exchange=self.service_response_exchange_name, exchange_type="direct") for tenant_id in self.tenant_ids: - queue_name = self.service_queue_prefix + "_" + tenant_id + response_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" self.channel.queue_declare( - queue=queue_name, + queue=response_queue_name, durable=True, arguments={ "x-dead-letter-exchange": "", @@ -229,68 +273,74 @@ class ServiceQueueManager(BaseQueueManager): }, ) self.channel.queue_bind( - queue=queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id + queue=response_queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id ) - @retry(tries=3, delay=5, jitter=(1, 3), logger=logger, exceptions=requests.exceptions.HTTPError) + response_queue_name = f"{self.service_response_queue_prefix}_{tenant_id}" + self.channel.queue_declare( + queue=response_queue_name, + durable=True, + arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.service_dlq_name, + "x-expires": self.queue_expiration_time, # TODO: check if necessary + }, + ) + self.channel.queue_bind(queue=response_queue_name, exchange=self.service_response_exchange_name, routing_key=tenant_id) + + @retry(tries=3, delay=5, jitter=(1, 3), logger=logger, exceptions=(requests.exceptions.HTTPError, requests.exceptions.ConnectionError)) def get_initial_tenant_ids(self, tenant_endpoint_url: str) -> list: - try: - response = requests.get(tenant_endpoint_url, timeout=10) - response.raise_for_status() # Raise an HTTPError for bad responses + response = requests.get(tenant_endpoint_url, timeout=10) + response.raise_for_status() # Raise an HTTPError for bad responses - if response.headers["content-type"].lower() == "application/json": - tenants = [tenant["tenantId"] for tenant in response.json()] - else: - logger.warning("Response is not in JSON format.") - except Exception as e: - logger.warning("An unexpected error occurred:", e) - - return tenants + if response.headers["content-type"].lower() == "application/json": + tenants = [tenant["tenantId"] for tenant in response.json()] + return tenants + return [] @retry(exceptions=pika.exceptions.AMQPConnectionError, tries=3, delay=5, jitter=(1, 3), logger=logger) - def start_sequential_consume(self, message_processor: Callable): + def start_sequential_basic_get(self, message_processor: Callable) -> None: self.establish_connection() - self._consuming = True - try: - while self._consuming: + while not self.should_stop.is_set(): for tenant_id in self.tenant_ids: - queue_name = self.service_queue_prefix + "_" + tenant_id + queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" method_frame, properties, body = self.channel.basic_get(queue_name) if method_frame: + logger.debug("PROCESSING MESSAGE") on_message_callback = self._make_on_message_callback(message_processor, tenant_id) on_message_callback(self.channel, method_frame, properties, body) else: - logger.debug("No message returned") - time.sleep(self.connection_sleep) + logger.debug(f"No message returned for queue {queue_name}") + # time.sleep(self.connection_sleep) + time.sleep(0.1) ### Handle tenant events self.check_tenant_exchange() + except KeyboardInterrupt: logger.info("Exiting...") finally: self.stop_consuming() - def check_tenant_exchange(self): - while True: + def check_tenant_exchange(self) -> None: + while not self.tenant_exchange_queue.empty(): try: - event, tenant = self.tenant_exchange.get(block=False) + event, tenant = self.tenant_exchange_queue.get_nowait() if event == "create": self.on_tenant_created(tenant) elif event == "delete": self.on_tenant_deleted(tenant) - else: - break - except Exception: - logger.debug("No tenant exchange events.") + except queue.Empty: + # time.sleep(self.connection_sleep) break def publish_message_to_input_queue( self, tenant_id: str, message: Union[str, bytes, dict], properties: pika.BasicProperties = None - ): + ) -> None: if isinstance(message, str): message = message.encode("utf-8") elif isinstance(message, dict): @@ -305,20 +355,22 @@ class ServiceQueueManager(BaseQueueManager): ) logger.info(f"Published message to queue {tenant_id}.") - def purge_queues(self): + def purge_queues(self) -> None: self.establish_connection() try: for tenant_id in self.tenant_ids: - queue_name = self.service_queue_prefix + "_" + tenant_id - self.channel.queue_purge(queue_name) + request_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" + response_queue_name = f"{self.service_response_queue_prefix}_{tenant_id}" + self.channel.queue_purge(request_queue_name) + self.channel.queue_purge(response_queue_name) logger.info("Queues purged.") except pika.exceptions.ChannelWrongStateError: pass - def on_tenant_created(self, tenant_id: str): - queue_name = self.service_queue_prefix + "_" + tenant_id + def on_tenant_created(self, tenant_id: str) -> None: + request_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" self.channel.queue_declare( - queue=queue_name, + queue=request_queue_name, durable=True, arguments={ "x-dead-letter-exchange": "", @@ -326,18 +378,36 @@ class ServiceQueueManager(BaseQueueManager): "x-expires": self.queue_expiration_time, # TODO: check if necessary }, ) - self.channel.queue_bind(queue=queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) + self.channel.queue_bind(queue=request_queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) + + response_queue_name = f"{self.service_response_queue_prefix}_{tenant_id}" + self.channel.queue_declare( + queue=response_queue_name, + durable=True, + arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.service_dlq_name, + "x-expires": self.queue_expiration_time, # TODO: check if necessary + }, + ) + self.channel.queue_bind(queue=response_queue_name, exchange=self.service_response_exchange_name, routing_key=tenant_id) + self.tenant_ids.append(tenant_id) logger.debug(f"Added tenant {tenant_id}.") - def on_tenant_deleted(self, tenant_id: str): - queue_name = self.service_queue_prefix + "_" + tenant_id - self.channel.queue_unbind(queue=queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) - self.channel.queue_delete(queue_name) + def on_tenant_deleted(self, tenant_id: str) -> None: + request_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" + self.channel.queue_unbind(queue=request_queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) + self.channel.queue_delete(request_queue_name) + + response_queue_name = f"{self.service_response_queue_prefix}_{tenant_id}" + self.channel.queue_unbind(queue=response_queue_name, exchange=self.service_response_exchange_name, routing_key=tenant_id) + self.channel.queue_delete(response_queue_name) + self.tenant_ids.remove(tenant_id) logger.debug(f"Deleted tenant {tenant_id}.") - def _make_on_message_callback(self, message_processor: MessageProcessor, tenant_id: str): + def _make_on_message_callback(self, message_processor: MessageProcessor, tenant_id: str) -> Callable: def process_message_body_and_await_result(unpacked_message_body): # Processing the message in a separate thread is necessary for the main thread pika client to be able to # process data events (e.g. heartbeats) while the message is being processed. @@ -390,16 +460,4 @@ class ServiceQueueManager(BaseQueueManager): channel.basic_nack(method.delivery_tag, requeue=False) raise - return on_message_callback - - def stop_consuming(self): - self._consuming = False - if self.channel and self.channel.is_open: - logger.info("Stopping consuming...") - self.channel.stop_consuming() - logger.info("Closing channel...") - self.channel.close() - - if self.connection and self.connection.is_open: - logger.info("Closing connection to RabbitMQ...") - self.connection.close() \ No newline at end of file + return on_message_callback \ No newline at end of file diff --git a/pyinfra/storage/utils.py b/pyinfra/storage/utils.py index 36b5b81..3e14a2d 100644 --- a/pyinfra/storage/utils.py +++ b/pyinfra/storage/utils.py @@ -19,6 +19,17 @@ class DossierIdFileIdDownloadPayload(BaseModel): return f"{self.dossierId}/{self.fileId}.{self.targetFileExtension}" +class TenantIdDossierIdFileIdDownloadPayload(BaseModel): + tenantId: str + dossierId: str + fileId: str + targetFileExtension: str + + @property + def targetFilePath(self): + return f"{self.tenantId}/{self.dossierId}/{self.fileId}.{self.targetFileExtension}" + + class DossierIdFileIdUploadPayload(BaseModel): dossierId: str fileId: str @@ -27,6 +38,17 @@ class DossierIdFileIdUploadPayload(BaseModel): @property def responseFilePath(self): return f"{self.dossierId}/{self.fileId}.{self.responseFileExtension}" + + +class TenantIdDossierIdFileIdUploadPayload(BaseModel): + tenantId: str + dossierId: str + fileId: str + responseFileExtension: str + + @property + def responseFilePath(self): + return f"{self.tenantId}/{self.dossierId}/{self.fileId}.{self.responseFileExtension}" class TargetResponseFilePathDownloadPayload(BaseModel): @@ -55,7 +77,9 @@ def download_data_as_specified_in_message(storage: Storage, raw_payload: dict) - """ try: - if "dossierId" in raw_payload: + if "tenantId" in raw_payload and "dossierId" in raw_payload: + payload = TenantIdDossierIdFileIdDownloadPayload(**raw_payload) + elif "tenantId" not in raw_payload and "dossierId" in raw_payload: payload = DossierIdFileIdDownloadPayload(**raw_payload) else: payload = TargetResponseFilePathDownloadPayload(**raw_payload) @@ -106,7 +130,9 @@ def upload_data_as_specified_in_message(storage: Storage, raw_payload: dict, dat """ try: - if "dossierId" in raw_payload: + if "tenantId" in raw_payload and "dossierId" in raw_payload: + payload = TenantIdDossierIdFileIdUploadPayload(**raw_payload) + elif "tenantId" not in raw_payload and "dossierId" in raw_payload: payload = DossierIdFileIdUploadPayload(**raw_payload) else: payload = TargetResponseFilePathUploadPayload(**raw_payload) diff --git a/scripts/send_request.py b/scripts/send_request.py index c7d2046..76b640d 100644 --- a/scripts/send_request.py +++ b/scripts/send_request.py @@ -1,25 +1,27 @@ import gzip import json +import time from operator import itemgetter from kn_utils.logging import logger from pyinfra.config.loader import load_settings, local_pyinfra_root_path # from pyinfra.queue.manager import QueueManager -from pyinfra.queue.sequential_tenants import QueueManager +# from pyinfra.queue.sequential_tenants import QueueManager +from pyinfra.queue.threaded_tenants import ServiceQueueManager, TenantQueueManager from pyinfra.storage.storages.s3 import get_s3_storage_from_settings settings = load_settings(local_pyinfra_root_path / "config/") -def upload_json_and_make_message_body(): +def upload_json_and_make_message_body(tenant_id: str): dossier_id, file_id, suffix = "dossier", "file", "json.gz" content = { "numberOfPages": 7, "sectionTexts": "data", } - object_name = f"{dossier_id}/{file_id}.{suffix}" + object_name = f"{tenant_id}/{dossier_id}/{file_id}.{suffix}" data = gzip.compress(json.dumps(content).encode("utf-8")) storage = get_s3_storage_from_settings(settings) @@ -28,6 +30,7 @@ def upload_json_and_make_message_body(): storage.put_object(object_name, data) message_body = { + "tenantId": tenant_id, "dossierId": dossier_id, "fileId": file_id, "targetFileExtension": suffix, @@ -36,18 +39,38 @@ def upload_json_and_make_message_body(): return message_body -def main(): - queue_manager = QueueManager(settings) +def tenant_event_message(tenant_id: str): + return {"tenantId": tenant_id} + + +def send_tenant_event(tenant_id: str, event_type: str): + queue_manager = TenantQueueManager(settings) + queue_manager.purge_queues() + message = tenant_event_message(tenant_id) + if event_type == "create": + queue_manager.publish_message_to_tenant_created_queue(message=message) + elif event_type == "delete": + queue_manager.publish_message_to_tenant_deleted_queue(message=message) + else: + logger.warning(f"Event type '{event_type}' not known.") + queue_manager.stop_consuming() + + +def send_service_request(tenant_id: str): + queue_manager = ServiceQueueManager(settings) + queue_name = f"{settings.rabbitmq.service_response_queue_prefix}_{tenant_id}" + queue_manager.purge_queues() - message = upload_json_and_make_message_body() + message = upload_json_and_make_message_body(tenant_id) - queue_manager.publish_message_to_input_queue(tenant_id="redaction", message=message) - logger.info(f"Put {message} on {settings.rabbitmq.input_queue}.") + queue_manager.publish_message_to_input_queue(tenant_id=tenant_id, message=message) + logger.info(f"Put {message} on {queue_name}.") storage = get_s3_storage_from_settings(settings) + for method_frame, properties, body in queue_manager.channel.consume( - queue=settings.rabbitmq.output_queue, inactivity_timeout=15 + queue=queue_name, inactivity_timeout=15 ): if not body: break @@ -55,14 +78,34 @@ def main(): logger.info(f"Received {response}") logger.info(f"Message headers: {properties.headers}") queue_manager.channel.basic_ack(method_frame.delivery_tag) - dossier_id, file_id = itemgetter("dossierId", "fileId")(response) + tenant_id, dossier_id, file_id = itemgetter("tenantId", "dossierId", "fileId")(response) suffix = message["responseFileExtension"] - print(f"{dossier_id}/{file_id}.{suffix}") - result = storage.get_object(f"{dossier_id}/{file_id}.{suffix}") + print(f"{tenant_id}/{dossier_id}/{file_id}.{suffix}") + result = storage.get_object(f"{tenant_id}/{dossier_id}/{file_id}.{suffix}") result = json.loads(gzip.decompress(result)) logger.info(f"Contents of result on storage: {result}") + break queue_manager.stop_consuming() if __name__ == "__main__": - main() + tenant_ids = ["a", "b", "c", "d"] + # with ccf.ThreadPoolExecutor() as executor: + # results = executor.map(main, tenant_ids) + # for tenant in tenant_ids: + # main(tenant) + + send_service_request("redaction") + + # for tenant in tenant_ids: + # send_tenant_event(tenant_id=tenant, event_type="create") + + # # time.sleep(1) + + # for tenant in tenant_ids: + # send_service_request(tenant_id=tenant) + + # # time.sleep(1) + + # for tenant in tenant_ids: + # send_tenant_event(tenant_id=tenant, event_type="delete") \ No newline at end of file diff --git a/tests/docker-compose.yml b/tests/docker-compose.yml index c53537c..108a437 100644 --- a/tests/docker-compose.yml +++ b/tests/docker-compose.yml @@ -1,31 +1,41 @@ -version: '2' +version: '3.8' services: minio: - image: minio/minio:RELEASE.2022-06-11T19-55-32Z + image: minio/minio:latest + container_name: minio ports: - "9000:9000" environment: - MINIO_ROOT_PASSWORD=password - MINIO_ROOT_USER=root volumes: - - /tmp/minio_store:/data + - /tmp/data/minio_store:/data command: server /data - network_mode: "bridge" + network_mode: "bridge" + extra_hosts: + - "host.docker.internal:host-gateway" rabbitmq: - image: docker.io/bitnami/rabbitmq:3.9.8 + image: docker.io/bitnami/rabbitmq:latest + container_name: rabbitmq ports: - - '4369:4369' - - '5551:5551' - - '5552:5552' + # - '4369:4369' + # - '5551:5551' + # - '5552:5552' - '5672:5672' - - '25672:25672' - '15672:15672' + # - '25672:25672' environment: - RABBITMQ_SECURE_PASSWORD=yes - RABBITMQ_VM_MEMORY_HIGH_WATERMARK=100% - RABBITMQ_DISK_FREE_ABSOLUTE_LIMIT=20Gi + - RABBITMQ_MANAGEMENT_ALLOW_WEB_ACCESS=true network_mode: "bridge" volumes: - - /opt/bitnami/rabbitmq/.rabbitmq/:/data/bitnami -volumes: - mdata: \ No newline at end of file + - /tmp/bitnami/rabbitmq/.rabbitmq/:/data/bitnami + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost:15672" ] + interval: 30s + timeout: 10s + retries: 5 + extra_hosts: + - "host.docker.internal:host-gateway" From b2e3ae092fbf58ab2038b8250c75a71ab7ddfd11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Tue, 9 Jul 2024 18:20:55 +0200 Subject: [PATCH 08/35] feat: wip for multiple tenants --- poetry.lock | 249 ++++++++++++++++++- pyinfra/examples.py | 26 +- pyinfra/queue/async_tenants.py | 395 ++++++++++++++++++++++++++++++ pyinfra/queue/threaded_tenants.py | 6 +- pyproject.toml | 1 + scripts/send_request.py | 93 ++++--- scripts/send_threaded_request.py | 99 ++++++++ 7 files changed, 807 insertions(+), 62 deletions(-) create mode 100644 pyinfra/queue/async_tenants.py create mode 100644 scripts/send_threaded_request.py diff --git a/poetry.lock b/poetry.lock index 8b9f874..5f75804 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,5 +1,35 @@ # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +[[package]] +name = "aio-pika" +version = "9.4.2" +description = "Wrapper around the aiormq for asyncio and humans" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "aio_pika-9.4.2-py3-none-any.whl", hash = "sha256:22e5fa27d10a3817dd24c031cc477953aaf7c3be5f4f25d2582a55ec229adc4c"}, + {file = "aio_pika-9.4.2.tar.gz", hash = "sha256:d1217dc28d09be9dff96c06cdf2e82c92599a34f154e8932bf35373157f3424d"}, +] + +[package.dependencies] +aiormq = ">=6.8.0,<6.9.0" +yarl = "*" + +[[package]] +name = "aiormq" +version = "6.8.0" +description = "Pure python AMQP asynchronous client library" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "aiormq-6.8.0-py3-none-any.whl", hash = "sha256:9a16174dcae4078c957a773d2f02d3dfd6c2fcf12c909dc244333a458f2aeab0"}, + {file = "aiormq-6.8.0.tar.gz", hash = "sha256:198f9c7430feb7bc491016099a06266dc45880b6b1de3925d410fde6541a66fb"}, +] + +[package.dependencies] +pamqp = "3.3.0" +yarl = "*" + [[package]] name = "annotated-types" version = "0.7.0" @@ -1604,6 +1634,105 @@ requests-oauthlib = ">=0.5.0" [package.extras] async = ["aiodns", "aiohttp (>=3.0)"] +[[package]] +name = "multidict" +version = "6.0.5" +description = "multidict implementation" +optional = false +python-versions = ">=3.7" +files = [ + {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9"}, + {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604"}, + {file = "multidict-6.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc"}, + {file = "multidict-6.0.5-cp310-cp310-win32.whl", hash = "sha256:7afcdd1fc07befad18ec4523a782cde4e93e0a2bf71239894b8d61ee578c1319"}, + {file = "multidict-6.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:99f60d34c048c5c2fabc766108c103612344c46e35d4ed9ae0673d33c8fb26e8"}, + {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f285e862d2f153a70586579c15c44656f888806ed0e5b56b64489afe4a2dbfba"}, + {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:53689bb4e102200a4fafa9de9c7c3c212ab40a7ab2c8e474491914d2305f187e"}, + {file = "multidict-6.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:612d1156111ae11d14afaf3a0669ebf6c170dbb735e510a7438ffe2369a847fd"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7be7047bd08accdb7487737631d25735c9a04327911de89ff1b26b81745bd4e3"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de170c7b4fe6859beb8926e84f7d7d6c693dfe8e27372ce3b76f01c46e489fcf"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04bde7a7b3de05732a4eb39c94574db1ec99abb56162d6c520ad26f83267de29"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85f67aed7bb647f93e7520633d8f51d3cbc6ab96957c71272b286b2f30dc70ed"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425bf820055005bfc8aa9a0b99ccb52cc2f4070153e34b701acc98d201693733"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d3eb1ceec286eba8220c26f3b0096cf189aea7057b6e7b7a2e60ed36b373b77f"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7901c05ead4b3fb75113fb1dd33eb1253c6d3ee37ce93305acd9d38e0b5f21a4"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e0e79d91e71b9867c73323a3444724d496c037e578a0e1755ae159ba14f4f3d1"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:29bfeb0dff5cb5fdab2023a7a9947b3b4af63e9c47cae2a10ad58394b517fddc"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e030047e85cbcedbfc073f71836d62dd5dadfbe7531cae27789ff66bc551bd5e"}, + {file = "multidict-6.0.5-cp311-cp311-win32.whl", hash = "sha256:2f4848aa3baa109e6ab81fe2006c77ed4d3cd1e0ac2c1fbddb7b1277c168788c"}, + {file = "multidict-6.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:2faa5ae9376faba05f630d7e5e6be05be22913782b927b19d12b8145968a85ea"}, + {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:51d035609b86722963404f711db441cf7134f1889107fb171a970c9701f92e1e"}, + {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cbebcd5bcaf1eaf302617c114aa67569dd3f090dd0ce8ba9e35e9985b41ac35b"}, + {file = "multidict-6.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ffc42c922dbfddb4a4c3b438eb056828719f07608af27d163191cb3e3aa6cc5"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceb3b7e6a0135e092de86110c5a74e46bda4bd4fbfeeb3a3bcec79c0f861e450"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79660376075cfd4b2c80f295528aa6beb2058fd289f4c9252f986751a4cd0496"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4428b29611e989719874670fd152b6625500ad6c686d464e99f5aaeeaca175a"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d84a5c3a5f7ce6db1f999fb9438f686bc2e09d38143f2d93d8406ed2dd6b9226"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76c0de87358b192de7ea9649beb392f107dcad9ad27276324c24c91774ca5271"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:79a6d2ba910adb2cbafc95dad936f8b9386e77c84c35bc0add315b856d7c3abb"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:92d16a3e275e38293623ebf639c471d3e03bb20b8ebb845237e0d3664914caef"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:fb616be3538599e797a2017cccca78e354c767165e8858ab5116813146041a24"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:14c2976aa9038c2629efa2c148022ed5eb4cb939e15ec7aace7ca932f48f9ba6"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:435a0984199d81ca178b9ae2c26ec3d49692d20ee29bc4c11a2a8d4514c67eda"}, + {file = "multidict-6.0.5-cp312-cp312-win32.whl", hash = "sha256:9fe7b0653ba3d9d65cbe7698cca585bf0f8c83dbbcc710db9c90f478e175f2d5"}, + {file = "multidict-6.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:01265f5e40f5a17f8241d52656ed27192be03bfa8764d88e8220141d1e4b3556"}, + {file = "multidict-6.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:19fe01cea168585ba0f678cad6f58133db2aa14eccaf22f88e4a6dccadfad8b3"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bf7a982604375a8d49b6cc1b781c1747f243d91b81035a9b43a2126c04766f5"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:107c0cdefe028703fb5dafe640a409cb146d44a6ae201e55b35a4af8e95457dd"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:403c0911cd5d5791605808b942c88a8155c2592e05332d2bf78f18697a5fa15e"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aeaf541ddbad8311a87dd695ed9642401131ea39ad7bc8cf3ef3967fd093b626"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4972624066095e52b569e02b5ca97dbd7a7ddd4294bf4e7247d52635630dd83"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d946b0a9eb8aaa590df1fe082cee553ceab173e6cb5b03239716338629c50c7a"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b55358304d7a73d7bdf5de62494aaf70bd33015831ffd98bc498b433dfe5b10c"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a3145cb08d8625b2d3fee1b2d596a8766352979c9bffe5d7833e0503d0f0b5e5"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d65f25da8e248202bd47445cec78e0025c0fe7582b23ec69c3b27a640dd7a8e3"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c9bf56195c6bbd293340ea82eafd0071cb3d450c703d2c93afb89f93b8386ccc"}, + {file = "multidict-6.0.5-cp37-cp37m-win32.whl", hash = "sha256:69db76c09796b313331bb7048229e3bee7928eb62bab5e071e9f7fcc4879caee"}, + {file = "multidict-6.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:fce28b3c8a81b6b36dfac9feb1de115bab619b3c13905b419ec71d03a3fc1423"}, + {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76f067f5121dcecf0d63a67f29080b26c43c71a98b10c701b0677e4a065fbd54"}, + {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b82cc8ace10ab5bd93235dfaab2021c70637005e1ac787031f4d1da63d493c1d"}, + {file = "multidict-6.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5cb241881eefd96b46f89b1a056187ea8e9ba14ab88ba632e68d7a2ecb7aadf7"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e94e6912639a02ce173341ff62cc1201232ab86b8a8fcc05572741a5dc7d93"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09a892e4a9fb47331da06948690ae38eaa2426de97b4ccbfafbdcbe5c8f37ff8"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55205d03e8a598cfc688c71ca8ea5f66447164efff8869517f175ea632c7cb7b"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37b15024f864916b4951adb95d3a80c9431299080341ab9544ed148091b53f50"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2a1dee728b52b33eebff5072817176c172050d44d67befd681609b4746e1c2e"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:edd08e6f2f1a390bf137080507e44ccc086353c8e98c657e666c017718561b89"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:60d698e8179a42ec85172d12f50b1668254628425a6bd611aba022257cac1386"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3d25f19500588cbc47dc19081d78131c32637c25804df8414463ec908631e453"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4cc0ef8b962ac7a5e62b9e826bd0cd5040e7d401bc45a6835910ed699037a461"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:eca2e9d0cc5a889850e9bbd68e98314ada174ff6ccd1129500103df7a94a7a44"}, + {file = "multidict-6.0.5-cp38-cp38-win32.whl", hash = "sha256:4a6a4f196f08c58c59e0b8ef8ec441d12aee4125a7d4f4fef000ccb22f8d7241"}, + {file = "multidict-6.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:0275e35209c27a3f7951e1ce7aaf93ce0d163b28948444bec61dd7badc6d3f8c"}, + {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e7be68734bd8c9a513f2b0cfd508802d6609da068f40dc57d4e3494cefc92929"}, + {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1d9ea7a7e779d7a3561aade7d596649fbecfa5c08a7674b11b423783217933f9"}, + {file = "multidict-6.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ea1456df2a27c73ce51120fa2f519f1bea2f4a03a917f4a43c8707cf4cbbae1a"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf590b134eb70629e350691ecca88eac3e3b8b3c86992042fb82e3cb1830d5e1"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5c0631926c4f58e9a5ccce555ad7747d9a9f8b10619621f22f9635f069f6233e"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1c6912ab9ff5f179eaf6efe7365c1f425ed690b03341911bf4939ef2f3046"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0868d64af83169e4d4152ec612637a543f7a336e4a307b119e98042e852ad9c"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:141b43360bfd3bdd75f15ed811850763555a251e38b2405967f8e25fb43f7d40"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7df704ca8cf4a073334e0427ae2345323613e4df18cc224f647f251e5e75a527"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6214c5a5571802c33f80e6c84713b2c79e024995b9c5897f794b43e714daeec9"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd6c8fca38178e12c00418de737aef1261576bd1b6e8c6134d3e729a4e858b38"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e02021f87a5b6932fa6ce916ca004c4d441509d33bbdbeca70d05dff5e9d2479"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ebd8d160f91a764652d3e51ce0d2956b38efe37c9231cd82cfc0bed2e40b581c"}, + {file = "multidict-6.0.5-cp39-cp39-win32.whl", hash = "sha256:04da1bb8c8dbadf2a18a452639771951c662c5ad03aefe4884775454be322c9b"}, + {file = "multidict-6.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:d6f6d4f185481c9669b9447bf9d9cf3b95a0e9df9d169bbc17e363b7d5487755"}, + {file = "multidict-6.0.5-py3-none-any.whl", hash = "sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7"}, + {file = "multidict-6.0.5.tar.gz", hash = "sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da"}, +] + [[package]] name = "mypy-extensions" version = "1.0.0" @@ -2068,6 +2197,21 @@ files = [ {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] +[[package]] +name = "pamqp" +version = "3.3.0" +description = "RabbitMQ Focused AMQP low-level library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pamqp-3.3.0-py2.py3-none-any.whl", hash = "sha256:c901a684794157ae39b52cbf700db8c9aae7a470f13528b9d7b4e5f7202f8eb0"}, + {file = "pamqp-3.3.0.tar.gz", hash = "sha256:40b8795bd4efcf2b0f8821c1de83d12ca16d5760f4507836267fd7a02b06763b"}, +] + +[package.extras] +codegen = ["lxml", "requests", "yapf"] +testing = ["coverage", "flake8", "flake8-comprehensions", "flake8-deprecated", "flake8-import-order", "flake8-print", "flake8-quotes", "flake8-rst-docstrings", "flake8-tuple", "yapf"] + [[package]] name = "parso" version = "0.8.4" @@ -3320,6 +3464,109 @@ files = [ {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, ] +[[package]] +name = "yarl" +version = "1.9.4" +description = "Yet another URL library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"}, + {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"}, + {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98"}, + {file = "yarl-1.9.4-cp311-cp311-win32.whl", hash = "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31"}, + {file = "yarl-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10"}, + {file = "yarl-1.9.4-cp312-cp312-win32.whl", hash = "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7"}, + {file = "yarl-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984"}, + {file = "yarl-1.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434"}, + {file = "yarl-1.9.4-cp37-cp37m-win32.whl", hash = "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749"}, + {file = "yarl-1.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"}, + {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"}, + {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"}, + {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"}, + {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"}, + {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"}, + {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + [[package]] name = "zipp" version = "3.19.2" @@ -3338,4 +3585,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.11" -content-hash = "62515e90532a3a74d73bc0d386d226abd56ba3de84ce80e853b127bf4d8dc512" +content-hash = "f5092a8dc5540c2085559368c90f5a69efe0b1eba468f5545f29194a305b004d" diff --git a/pyinfra/examples.py b/pyinfra/examples.py index 9f8a227..4440bac 100644 --- a/pyinfra/examples.py +++ b/pyinfra/examples.py @@ -1,3 +1,5 @@ +import asyncio + from dynaconf import Dynaconf from fastapi import FastAPI from kn_utils.logging import logger @@ -5,6 +7,7 @@ from threading import Thread from pyinfra.config.loader import get_pyinfra_validators, validate_settings from pyinfra.queue.callback import Callback from pyinfra.queue.threaded_tenants import ServiceQueueManager, TenantQueueManager +from pyinfra.queue.async_tenants import AsyncQueueManager from pyinfra.utils.opentelemetry import instrument_pika, setup_trace, instrument_app from pyinfra.webserver.prometheus import ( add_prometheus_endpoint, @@ -34,8 +37,8 @@ def start_standard_queue_consumer( app = app or FastAPI() - tenant_manager = TenantQueueManager(settings) - service_manager = ServiceQueueManager(settings) + # tenant_manager = TenantQueueManager(settings) + # service_manager = ServiceQueueManager(settings) if settings.metrics.prometheus.enabled: logger.info("Prometheus metrics enabled.") @@ -48,16 +51,21 @@ def start_standard_queue_consumer( instrument_pika() instrument_app(app) - app = add_health_check_endpoint(app, service_manager.is_ready) + manager = AsyncQueueManager(settings=settings, message_processor=callback) + + # app = add_health_check_endpoint(app, service_manager.is_ready) + app = add_health_check_endpoint(app, manager.is_ready) webserver_thread = create_webserver_thread_from_settings(app, settings) webserver_thread.start() - thread_t = Thread(target=tenant_manager.start_consuming, daemon=True) - thread_s = Thread(target=service_manager.start_sequential_basic_get, args=(callback,), daemon=True) + # thread_t = Thread(target=tenant_manager.start_consuming, daemon=True) + # thread_s = Thread(target=service_manager.start_sequential_basic_get, args=(callback,), daemon=True) - thread_t.start() - thread_s.start() + # thread_t.start() + # thread_s.start() - thread_t.join() - thread_s.join() + # thread_t.join() + # thread_s.join() + + asyncio.run(manager.start_processing()) \ No newline at end of file diff --git a/pyinfra/queue/async_tenants.py b/pyinfra/queue/async_tenants.py new file mode 100644 index 0000000..17e912e --- /dev/null +++ b/pyinfra/queue/async_tenants.py @@ -0,0 +1,395 @@ +import aiormq +import asyncio +import aio_pika +import concurrent.futures +import requests +import json + +from aio_pika import Message, DeliveryMode +from aio_pika.abc import AbstractIncomingMessage +from dynaconf import Dynaconf +from typing import Callable, Union + +from kn_utils.logging import logger + +from pyinfra.config.loader import validate_settings +from pyinfra.config.validators import queue_manager_validators +from pyinfra.config.loader import load_settings, local_pyinfra_root_path +from pyinfra.queue.callback import make_download_process_upload_callback + + +MessageProcessor = Callable[[dict], dict] + + +class AsyncQueueManager: + + def __init__(self, settings: Dynaconf, message_processor: Callable = None) -> None: + validate_settings(settings, queue_manager_validators) + + self.message_processor = message_processor + self.connection_params = self.get_connection_params(settings) + self.connection = None + self.channel = None + + self.active_tenants = self.get_initial_tenant_ids(tenant_endpoint_url=settings.storage.tenant_server.endpoint) + self.consumer_tasks = {} + + self.connection_sleep = settings.rabbitmq.connection_sleep + self.queue_expiration_time = settings.rabbitmq.queue_expiration_time + + self.tenant_created_queue_name = self.get_tenant_created_queue_name(settings) + self.tenant_deleted_queue_name = self.get_tenant_deleted_queue_name(settings) + self.tenant_events_dlq_name = self.get_tenant_events_dlq_name(settings) + self.tenant_exchange_name = settings.rabbitmq.tenant_exchange_name + + self.service_request_exchange_name = settings.rabbitmq.service_request_exchange_name + self.service_response_exchange_name = settings.rabbitmq.service_response_exchange_name + + self.service_request_queue_prefix = settings.rabbitmq.service_request_queue_prefix + self.service_response_queue_prefix = settings.rabbitmq.service_response_queue_prefix + + self.service_dlq_name = settings.rabbitmq.service_dlq_name + + + @staticmethod + def get_connection_params(settings: Dynaconf): + return { + + "host": settings.rabbitmq.host, + "port": settings.rabbitmq.port, + "login": settings.rabbitmq.username, + "password":settings.rabbitmq.password, + "client_properties": {"heartbeat": settings.rabbitmq.heartbeat} + } + + def get_initial_tenant_ids(self, tenant_endpoint_url: str) -> set: + response = requests.get(tenant_endpoint_url, timeout=10) + response.raise_for_status() # Raise an HTTPError for bad responses + + if response.headers["content-type"].lower() == "application/json": + tenants = {tenant["tenantId"] for tenant in response.json()} + return tenants + return set() + + def get_tenant_created_queue_name(self, settings: Dynaconf) -> str: + return self.get_queue_name_with_suffix( + suffix=settings.rabbitmq.tenant_created_event_queue_suffix, pod_name=settings.kubernetes.pod_name + ) + + def get_tenant_deleted_queue_name(self, settings: Dynaconf) -> str: + return self.get_queue_name_with_suffix( + suffix=settings.rabbitmq.tenant_deleted_event_queue_suffix, pod_name=settings.kubernetes.pod_name + ) + + def get_tenant_events_dlq_name(self, settings: Dynaconf) -> str: + return self.get_queue_name_with_suffix( + suffix=settings.rabbitmq.tenant_event_dlq_suffix, pod_name=settings.kubernetes.pod_name + ) + + def get_queue_name_with_suffix(self, suffix: str, pod_name: str) -> str: + if not self.use_default_queue_name() and pod_name: + return f"{pod_name}{suffix}" + return self.get_default_queue_name() + + def use_default_queue_name(self) -> bool: + return False + + def get_default_queue_name(self): + raise NotImplementedError("Queue name method not implemented") + + async def is_ready(self) -> bool: + await self.connect() + return self.channel.is_open + + #### ASYNC STUFF + async def purge_queues(self) -> None: + await self.establish_connection() + try: + for tenant_id in self.active_tenants: + service_request_queue = await self.channel.get_queue(f"{self.service_request_queue_prefix}_{tenant_id}") + await service_request_queue.purge() + service_response_queue = await self.channel.get_queue(f"{self.service_response_queue_prefix}_{tenant_id}") + await service_response_queue.purge() + logger.info("Queues purged.") + except aio_pika.exceptions.ChannelInvalidStateError: + pass + + async def connect(self): + self.connection = await aio_pika.connect_robust(**self.connection_params) + self.channel = await self.connection.channel() + logger.info("Connection established.") + + async def establish_connection(self): + await self.connect() + await self.initialize_queues() + logger.info("Queues initialized.") + # await self.start_processing() + + async def start_processing(self): + await self.establish_connection() + tenant_events = asyncio.create_task(self.handle_tenant_events()) + service_events = asyncio.create_task(self.start_consumers()) + + await asyncio.gather(tenant_events, service_events) + + + async def initialize_queues(self): + await self.channel.set_qos(prefetch_count=1) + + service_request_exchange = await self.channel.declare_exchange(name=self.service_request_exchange_name, type=aio_pika.ExchangeType.DIRECT) + service_response_exchange = await self.channel.declare_exchange(name=self.service_response_exchange_name, type=aio_pika.ExchangeType.DIRECT) + + for tenant_id in self.active_tenants: + request_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" + request_queue = await self.channel.declare_queue( + name=request_queue_name, + durable=True, + arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.service_dlq_name, + "x-expires": self.queue_expiration_time, # TODO: check if necessary + "x-max-priority": 2, + }, + ) + await request_queue.bind(exchange=service_request_exchange, routing_key=tenant_id) + + response_queue_name = f"{self.service_response_queue_prefix}_{tenant_id}" + response_queue = await self.channel.declare_queue( + name=response_queue_name, + durable=True, + arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.service_dlq_name, + "x-expires": self.queue_expiration_time, # TODO: check if necessary + }, + ) + await response_queue.bind(exchange=service_response_exchange, routing_key=tenant_id) + + async def handle_tenant_events(self): + # Declare the topic exchange for tenant events + exchange = await self.channel.declare_exchange( + self.tenant_exchange_name, aio_pika.ExchangeType.TOPIC + ) + # Declare a queue for receiving tenant events + queue = await self.channel.declare_queue("tenant_events_queue", arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.tenant_events_dlq_name, + }, + durable=True,) + + await queue.bind(exchange, routing_key="tenant.*") + + async with queue.iterator() as queue_iter: + async for message in queue_iter: + async with message.process(reject_on_redelivered=True): + routing_key = message.routing_key + tenant_id = message.body.decode() + if routing_key == "tenant.created": + # Handle tenant creation + await self.handle_tenant_created(tenant_id) + + elif routing_key == "tenant.deleted": + # Handle tenant deletion + await self.handle_tenant_deleted(tenant_id) + else: + message.nack() + continue + message.ack() + await self.restart_consumers() + + async def handle_tenant_created(self, tenant_id): + # Handle creation of input and output queues for the new tenant + await self.create_tenant_queues(tenant_id) + await self.restart_consumers() + + async def handle_tenant_deleted(self, tenant_id): + # Handle deletion of input and output queues for the tenant + await self.delete_tenant_queues(tenant_id) + await self.restart_consumers() + + async def create_tenant_queues(self, tenant_id): + # Implement queue creation logic for the tenant + queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" + queue = await self.channel.declare_queue( + name=queue_name, + durable=True, + arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.service_dlq_name, + "x-expires": self.queue_expiration_time, # TODO: check if necessary + }, + ) + exchange = await self.channel.get_exchange(self.service_request_exchange_name) + await queue.bind(exchange=exchange, routing_key=tenant_id) + self.active_tenants.add(tenant_id) + + + async def delete_tenant_queues(self, tenant_id): + queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" + queue = await self.channel.get_queue(queue_name) + exchange = await self.channel.get_exchange(self.service_request_exchange_name) + await queue.unbind(exchange=exchange, routing_key=tenant_id) + await self.channel.queue_delete(queue_name) + self.active_tenants.discard(tenant_id) + + async def consume_from_request_queue(self, tenant_id): + queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" + queue = await self.channel.get_queue(queue_name) + + async with queue.iterator() as queue_iter: + async for message in queue_iter: + async with message.process(): + on_message_callback = await self._make_on_message_callback(self.message_processor, tenant_id) + await on_message_callback(message) + + async def publish_to_service_response_queue(self, tenant_id, result): + service_response_exchange = await self.channel.get_exchange(self.service_response_exchange_name) + + await service_response_exchange.publish(aio_pika.Message(body=json.dumps(result).encode()), + routing_key=tenant_id,) + + async def restart_consumers(self): + # Stop current consumers and start new ones for active tenants + await self.stop_consumers() + await self.start_consumers() + + async def start_consumers(self): + # Start consuming messages from input queues for active tenants + for tenant_id in self.active_tenants: + if tenant_id not in self.consumer_tasks: + self.consumer_tasks[tenant_id] = asyncio.create_task(self.consume_from_request_queue(tenant_id)) + + consumer_tasks = [self.consume_from_request_queue(tenant) for tenant in self.active_tenants] + await asyncio.gather(*consumer_tasks) + + async def stop_consumers(self): + for task in self.consumer_tasks.values(): + task.cancel() + try: + await task + except asyncio.CancelledError: + pass + self.consumer_tasks.clear() + + async def main_loop(self): + await self.establish_connection() + + async def shutdown(self): + # Implement cleanup logic + await self.stop_consumers() + if self.connection: + await self.connection.close() + + async def _make_on_message_callback(self, message_processor: MessageProcessor, tenant_id: str) -> Callable: + async def process_message_body_and_await_result(unpacked_message_body): + # Processing the message in a separate thread is necessary for the main thread pika client to be able to + # process data events (e.g. heartbeats) while the message is being processed. + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_pool_executor: + logger.info("Processing payload in separate thread.") + future = thread_pool_executor.submit(message_processor, unpacked_message_body) + + return future.result() + + async def on_message_callback(message: AbstractIncomingMessage): + logger.info(f"Received message from queue with delivery_tag {message.delivery_tag}.") + + if message.redelivered: + logger.warning(f"Declining message with {message.delivery_tag=} due to it being redelivered.") + await message.nack(requeue=False) + return + + if message.body.decode("utf-8") == "STOP": + logger.info("Received stop signal, stopping consuming...") + await message.ack() + await self.stop_consumers() + return + + try: + filtered_message_headers = ( + {k: v for k, v in message.properties.headers.items() if k.lower().startswith("x-")} + if message.properties.headers + else {} + ) + logger.debug(f"Processing message with {filtered_message_headers=}.") + result: dict = await ( + process_message_body_and_await_result({**json.loads(message.body), **filtered_message_headers}) or {} + ) + + await self.publish_to_service_response_queue(tenant_id, result) + logger.info(f"Published result to queue {tenant_id}.") + + await message.ack() + logger.debug(f"Message with {message.delivery_tag=} acknowledged.") + except FileNotFoundError as e: + logger.warning(f"{e}, declining message with {message.delivery_tag=}.") + await message.nack(requeue=False) + except Exception as e: + logger.warning(f"Failed to process message with {message.delivery_tag=}, declining...", exc_info=True) + logger.warning(e) + await message.nack(requeue=False) + raise + + return on_message_callback + + async def publish_message_to_input_queue( + self, tenant_id: str, message: Union[str, bytes, dict]) -> None: + if isinstance(message, str): + message = message.encode("utf-8") + elif isinstance(message, dict): + message = json.dumps(message).encode("utf-8") + + await self.establish_connection() + + service_request_exchange = await self.channel.get_exchange(self.service_request_exchange_name) + + await service_request_exchange.publish(message=Message(body=message, delivery_mode=DeliveryMode.NOT_PERSISTENT), routing_key=tenant_id) + + logger.info(f"Published message to queue {tenant_id}.") + + async def publish_message_to_tenant_created_queue( + self, message: Union[str, bytes, dict]) -> None: + if isinstance(message, str): + message = message.encode("utf-8") + elif isinstance(message, dict): + message = json.dumps(message).encode("utf-8") + + await self.establish_connection() + service_request_exchange = await self.channel.get_exchange(self.tenant_exchange_name) + + await service_request_exchange.publish(message=Message(body=message, delivery_mode=DeliveryMode.NOT_PERSISTENT), routing_key="tenant.created") + + logger.info(f"Published message to queue {self.tenant_created_queue_name}.") + + async def publish_message_to_tenant_deleted_queue( + self, message: Union[str, bytes, dict]) -> None: + if isinstance(message, str): + message = message.encode("utf-8") + elif isinstance(message, dict): + message = json.dumps(message).encode("utf-8") + + await self.establish_connection() + service_request_exchange = await self.channel.get_exchange(self.tenant_exchange_name) + + await service_request_exchange.publish(message=Message(body=message, delivery_mode=DeliveryMode.NOT_PERSISTENT), routing_key="tenant.delete") + + logger.info(f"Published message to queue {self.tenant_deleted_queue_name}.") + + + + +async def main() -> None: + import time + settings = load_settings(local_pyinfra_root_path / "config/") + callback = "" + + manager = AsyncQueueManager(settings=settings, message_processor=callback) + + await manager.main_loop() + + + while True: + time.sleep(100) + print("keep idling") + +if __name__ == '__main__': + asyncio.run(main()) \ No newline at end of file diff --git a/pyinfra/queue/threaded_tenants.py b/pyinfra/queue/threaded_tenants.py index 24fe9c3..cbf1e25 100644 --- a/pyinfra/queue/threaded_tenants.py +++ b/pyinfra/queue/threaded_tenants.py @@ -261,9 +261,9 @@ class ServiceQueueManager(BaseQueueManager): self.channel.exchange_declare(exchange=self.service_response_exchange_name, exchange_type="direct") for tenant_id in self.tenant_ids: - response_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" + request_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" self.channel.queue_declare( - queue=response_queue_name, + queue=request_queue_name, durable=True, arguments={ "x-dead-letter-exchange": "", @@ -273,7 +273,7 @@ class ServiceQueueManager(BaseQueueManager): }, ) self.channel.queue_bind( - queue=response_queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id + queue=request_queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id ) response_queue_name = f"{self.service_response_queue_prefix}_{tenant_id}" diff --git a/pyproject.toml b/pyproject.toml index 38a3c3b..cd71a63 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ opentelemetry-instrumentation-requests = "^0.46b0" opentelemetry-instrumentation-fastapi = "^0.46b0" wcwidth = "<=0.2.12" azure-monitor-opentelemetry = "^1.6.0" +aio-pika = "^9.4.2" [tool.poetry.group.dev.dependencies] pytest = "^7" diff --git a/scripts/send_request.py b/scripts/send_request.py index 76b640d..03d18d3 100644 --- a/scripts/send_request.py +++ b/scripts/send_request.py @@ -1,15 +1,15 @@ +import asyncio import gzip import json import time +from aio_pika.abc import AbstractIncomingMessage from operator import itemgetter - from kn_utils.logging import logger from pyinfra.config.loader import load_settings, local_pyinfra_root_path -# from pyinfra.queue.manager import QueueManager -# from pyinfra.queue.sequential_tenants import QueueManager -from pyinfra.queue.threaded_tenants import ServiceQueueManager, TenantQueueManager +from pyinfra.queue.async_tenants import AsyncQueueManager from pyinfra.storage.storages.s3 import get_s3_storage_from_settings +from pyinfra.storage.storages.s3 import S3Storage settings = load_settings(local_pyinfra_root_path / "config/") @@ -43,69 +43,64 @@ def tenant_event_message(tenant_id: str): return {"tenantId": tenant_id} -def send_tenant_event(tenant_id: str, event_type: str): - queue_manager = TenantQueueManager(settings) - queue_manager.purge_queues() +def on_message_callback(storage: S3Storage): + async def on_message(message: AbstractIncomingMessage) -> None: + async with message.process(): + if not message.body: + raise ValueError + response = json.loads(message.body) + logger.info(f"Received {response}") + logger.info(f"Message headers: {message.properties.headers}") + await message.ack() + tenant_id, dossier_id, file_id = itemgetter("tenantId", "dossierId", "fileId")(response) + suffix = response["responseFileExtension"] + result = storage.get_object(f"{tenant_id}/{dossier_id}/{file_id}.{suffix}") + result = json.loads(gzip.decompress(result)) + logger.info(f"Contents of result on storage: {result}") + return on_message + + +async def send_tenant_event(queue_manager: AsyncQueueManager, tenant_id: str, event_type: str): + await queue_manager.purge_queues() + message = tenant_event_message(tenant_id) if event_type == "create": - queue_manager.publish_message_to_tenant_created_queue(message=message) + await queue_manager.publish_message_to_tenant_created_queue(message=message) elif event_type == "delete": - queue_manager.publish_message_to_tenant_deleted_queue(message=message) + await queue_manager.publish_message_to_tenant_deleted_queue(message=message) else: logger.warning(f"Event type '{event_type}' not known.") - queue_manager.stop_consuming() + await queue_manager.stop_consumers() -def send_service_request(tenant_id: str): - queue_manager = ServiceQueueManager(settings) - queue_name = f"{settings.rabbitmq.service_response_queue_prefix}_{tenant_id}" +async def send_service_request(queue_manager: AsyncQueueManager, tenant_id: str): + request_queue_name = f"{settings.rabbitmq.service_request_queue_prefix}_{tenant_id}" - queue_manager.purge_queues() + await queue_manager.purge_queues() message = upload_json_and_make_message_body(tenant_id) - queue_manager.publish_message_to_input_queue(tenant_id=tenant_id, message=message) - logger.info(f"Put {message} on {queue_name}.") + await queue_manager.publish_message_to_input_queue(tenant_id=tenant_id, message=message) + logger.info(f"Put {message} on {request_queue_name}.") storage = get_s3_storage_from_settings(settings) - for method_frame, properties, body in queue_manager.channel.consume( - queue=queue_name, inactivity_timeout=15 - ): - if not body: - break - response = json.loads(body) - logger.info(f"Received {response}") - logger.info(f"Message headers: {properties.headers}") - queue_manager.channel.basic_ack(method_frame.delivery_tag) - tenant_id, dossier_id, file_id = itemgetter("tenantId", "dossierId", "fileId")(response) - suffix = message["responseFileExtension"] - print(f"{tenant_id}/{dossier_id}/{file_id}.{suffix}") - result = storage.get_object(f"{tenant_id}/{dossier_id}/{file_id}.{suffix}") - result = json.loads(gzip.decompress(result)) - logger.info(f"Contents of result on storage: {result}") - break - queue_manager.stop_consuming() + response_queue_name = f"{settings.rabbitmq.service_response_queue_prefix}_{tenant_id}" + service_response_queue = await queue_manager.channel.get_queue(name=response_queue_name) + + time.sleep(10) + + callback = on_message_callback(storage) + await service_response_queue.consume(callback=callback) + await queue_manager.stop_consumers() if __name__ == "__main__": - tenant_ids = ["a", "b", "c", "d"] - # with ccf.ThreadPoolExecutor() as executor: - # results = executor.map(main, tenant_ids) - # for tenant in tenant_ids: - # main(tenant) + # tenant_ids = ["a", "b", "c", "d"] - send_service_request("redaction") + queue_manager = AsyncQueueManager(settings) - # for tenant in tenant_ids: - # send_tenant_event(tenant_id=tenant, event_type="create") + # asyncio.run(send_tenant_event(queue_manager, "test", "create")) - # # time.sleep(1) - - # for tenant in tenant_ids: - # send_service_request(tenant_id=tenant) + asyncio.run(send_service_request(queue_manager,"redaction")) - # # time.sleep(1) - - # for tenant in tenant_ids: - # send_tenant_event(tenant_id=tenant, event_type="delete") \ No newline at end of file diff --git a/scripts/send_threaded_request.py b/scripts/send_threaded_request.py new file mode 100644 index 0000000..1150690 --- /dev/null +++ b/scripts/send_threaded_request.py @@ -0,0 +1,99 @@ +import gzip +import json +import time +from operator import itemgetter +from threading import Thread +from kn_utils.logging import logger + +from pyinfra.config.loader import load_settings, local_pyinfra_root_path +from pyinfra.queue.threaded_tenants import ServiceQueueManager, TenantQueueManager +from pyinfra.storage.storages.s3 import get_s3_storage_from_settings + +settings = load_settings(local_pyinfra_root_path / "config/") + + +def upload_json_and_make_message_body(tenant_id: str): + dossier_id, file_id, suffix = "dossier", "file", "json.gz" + content = { + "numberOfPages": 7, + "sectionTexts": "data", + } + + object_name = f"{tenant_id}/{dossier_id}/{file_id}.{suffix}" + data = gzip.compress(json.dumps(content).encode("utf-8")) + + storage = get_s3_storage_from_settings(settings) + if not storage.has_bucket(): + storage.make_bucket() + storage.put_object(object_name, data) + + message_body = { + "tenantId": tenant_id, + "dossierId": dossier_id, + "fileId": file_id, + "targetFileExtension": suffix, + "responseFileExtension": f"result.{suffix}", + } + return message_body + + +def tenant_event_message(tenant_id: str): + return {"tenantId": tenant_id} + + +def send_tenant_event(tenant_id: str, event_type: str): + queue_manager = TenantQueueManager(settings) + queue_manager.purge_queues() + message = tenant_event_message(tenant_id) + if event_type == "create": + queue_manager.publish_message_to_tenant_created_queue(message=message) + elif event_type == "delete": + queue_manager.publish_message_to_tenant_deleted_queue(message=message) + else: + logger.warning(f"Event type '{event_type}' not known.") + queue_manager.stop_consuming() + + +def send_service_request(tenant_id: str): + queue_manager = ServiceQueueManager(settings) + queue_name = f"{settings.rabbitmq.service_response_queue_prefix}_{tenant_id}" + + queue_manager.purge_queues() + + message = upload_json_and_make_message_body(tenant_id) + + queue_manager.publish_message_to_input_queue(tenant_id=tenant_id, message=message) + logger.info(f"Put {message} on {queue_name}.") + + storage = get_s3_storage_from_settings(settings) + + for method_frame, properties, body in queue_manager.channel.consume( + queue=queue_name, inactivity_timeout=15 + ): + if not body: + break + response = json.loads(body) + logger.info(f"Received {response}") + logger.info(f"Message headers: {properties.headers}") + queue_manager.channel.basic_ack(method_frame.delivery_tag) + tenant_id, dossier_id, file_id = itemgetter("tenantId", "dossierId", "fileId")(response) + suffix = message["responseFileExtension"] + print(f"{tenant_id}/{dossier_id}/{file_id}.{suffix}") + result = storage.get_object(f"{tenant_id}/{dossier_id}/{file_id}.{suffix}") + result = json.loads(gzip.decompress(result)) + logger.info(f"Contents of result on storage: {result}") + break + queue_manager.stop_consuming() + + +if __name__ == "__main__": + tenant_ids = ["a", "b", "c", "d"] + + for tenant in tenant_ids: + send_tenant_event(tenant_id=tenant, event_type="create") + + for tenant in tenant_ids: + send_service_request(tenant_id=tenant) + + for tenant in tenant_ids: + send_tenant_event(tenant_id=tenant, event_type="delete") From 6e7c4ccb7b114f5c5821331aa9243ee607e66c34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Wed, 10 Jul 2024 11:45:47 +0200 Subject: [PATCH 09/35] feat: wip for multiple tenants - for pkg build --- pyinfra/examples.py | 24 ++++++++++++------------ pyinfra/queue/threaded_tenants.py | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pyinfra/examples.py b/pyinfra/examples.py index 4440bac..b643e8b 100644 --- a/pyinfra/examples.py +++ b/pyinfra/examples.py @@ -37,8 +37,8 @@ def start_standard_queue_consumer( app = app or FastAPI() - # tenant_manager = TenantQueueManager(settings) - # service_manager = ServiceQueueManager(settings) + tenant_manager = TenantQueueManager(settings) + service_manager = ServiceQueueManager(settings) if settings.metrics.prometheus.enabled: logger.info("Prometheus metrics enabled.") @@ -51,21 +51,21 @@ def start_standard_queue_consumer( instrument_pika() instrument_app(app) - manager = AsyncQueueManager(settings=settings, message_processor=callback) + # manager = AsyncQueueManager(settings=settings, message_processor=callback) - # app = add_health_check_endpoint(app, service_manager.is_ready) - app = add_health_check_endpoint(app, manager.is_ready) + app = add_health_check_endpoint(app, service_manager.is_ready) + # app = add_health_check_endpoint(app, manager.is_ready) webserver_thread = create_webserver_thread_from_settings(app, settings) webserver_thread.start() - # thread_t = Thread(target=tenant_manager.start_consuming, daemon=True) - # thread_s = Thread(target=service_manager.start_sequential_basic_get, args=(callback,), daemon=True) + thread_t = Thread(target=tenant_manager.start_consuming, daemon=True) + thread_s = Thread(target=service_manager.start_sequential_basic_get, args=(callback,), daemon=True) - # thread_t.start() - # thread_s.start() + thread_t.start() + thread_s.start() - # thread_t.join() - # thread_s.join() + thread_t.join() + thread_s.join() - asyncio.run(manager.start_processing()) \ No newline at end of file + # asyncio.run(manager.start_processing()) \ No newline at end of file diff --git a/pyinfra/queue/threaded_tenants.py b/pyinfra/queue/threaded_tenants.py index cbf1e25..55eea3e 100644 --- a/pyinfra/queue/threaded_tenants.py +++ b/pyinfra/queue/threaded_tenants.py @@ -312,7 +312,7 @@ class ServiceQueueManager(BaseQueueManager): on_message_callback = self._make_on_message_callback(message_processor, tenant_id) on_message_callback(self.channel, method_frame, properties, body) else: - logger.debug(f"No message returned for queue {queue_name}") + # logger.debug(f"No message returned for queue {queue_name}") # time.sleep(self.connection_sleep) time.sleep(0.1) From 7b6408e0dee574f402d639fc6beca8f0e06efb74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Thu, 11 Jul 2024 11:04:02 +0200 Subject: [PATCH 10/35] feat: wip for multiple tenants - for pkg build --- pyinfra/queue/threaded_tenants.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyinfra/queue/threaded_tenants.py b/pyinfra/queue/threaded_tenants.py index 55eea3e..9a00de4 100644 --- a/pyinfra/queue/threaded_tenants.py +++ b/pyinfra/queue/threaded_tenants.py @@ -112,7 +112,7 @@ class TenantQueueManager(BaseQueueManager): self.tenant_events_dlq_name = self.get_tenant_events_dlq_name(settings) def initialize_queues(self) -> None: - self.channel.exchange_declare(exchange=self.tenant_exchange_name, exchange_type="topic") + self.channel.exchange_declare(exchange=self.tenant_exchange_name, exchange_type="topic", durable=True) self.channel.queue_declare( queue=self.tenant_created_queue_name, @@ -312,7 +312,7 @@ class ServiceQueueManager(BaseQueueManager): on_message_callback = self._make_on_message_callback(message_processor, tenant_id) on_message_callback(self.channel, method_frame, properties, body) else: - # logger.debug(f"No message returned for queue {queue_name}") + logger.debug(f"No message returned for queue {queue_name}") # time.sleep(self.connection_sleep) time.sleep(0.1) From 9b20a67ace7f7b9dbc85aadc6a4b3d6bba759b4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Thu, 11 Jul 2024 11:41:09 +0200 Subject: [PATCH 11/35] feat: wip for multiple tenants - for pkg build --- pyinfra/queue/threaded_tenants.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyinfra/queue/threaded_tenants.py b/pyinfra/queue/threaded_tenants.py index 9a00de4..a18e66f 100644 --- a/pyinfra/queue/threaded_tenants.py +++ b/pyinfra/queue/threaded_tenants.py @@ -257,8 +257,8 @@ class ServiceQueueManager(BaseQueueManager): self.tenant_ids = self.get_initial_tenant_ids(tenant_endpoint_url=settings.storage.tenant_server.endpoint) def initialize_queues(self) -> None: - self.channel.exchange_declare(exchange=self.service_request_exchange_name, exchange_type="direct") - self.channel.exchange_declare(exchange=self.service_response_exchange_name, exchange_type="direct") + self.channel.exchange_declare(exchange=self.service_request_exchange_name, exchange_type="direct", durable=True) + self.channel.exchange_declare(exchange=self.service_response_exchange_name, exchange_type="direct", durable=True) for tenant_id in self.tenant_ids: request_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" From 2da4f37620ab0287e6d417c63f0cf27634886e79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Thu, 11 Jul 2024 12:49:07 +0200 Subject: [PATCH 12/35] feat: wip for multiple tenants - for pkg build --- pyinfra/queue/threaded_tenants.py | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/pyinfra/queue/threaded_tenants.py b/pyinfra/queue/threaded_tenants.py index a18e66f..219f3d2 100644 --- a/pyinfra/queue/threaded_tenants.py +++ b/pyinfra/queue/threaded_tenants.py @@ -250,7 +250,6 @@ class ServiceQueueManager(BaseQueueManager): self.service_response_exchange_name = settings.rabbitmq.service_response_exchange_name self.service_request_queue_prefix = settings.rabbitmq.service_request_queue_prefix - self.service_response_queue_prefix = settings.rabbitmq.service_response_queue_prefix self.service_dlq_name = settings.rabbitmq.service_dlq_name @@ -276,18 +275,6 @@ class ServiceQueueManager(BaseQueueManager): queue=request_queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id ) - response_queue_name = f"{self.service_response_queue_prefix}_{tenant_id}" - self.channel.queue_declare( - queue=response_queue_name, - durable=True, - arguments={ - "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.service_dlq_name, - "x-expires": self.queue_expiration_time, # TODO: check if necessary - }, - ) - self.channel.queue_bind(queue=response_queue_name, exchange=self.service_response_exchange_name, routing_key=tenant_id) - @retry(tries=3, delay=5, jitter=(1, 3), logger=logger, exceptions=(requests.exceptions.HTTPError, requests.exceptions.ConnectionError)) def get_initial_tenant_ids(self, tenant_endpoint_url: str) -> list: response = requests.get(tenant_endpoint_url, timeout=10) @@ -360,9 +347,7 @@ class ServiceQueueManager(BaseQueueManager): try: for tenant_id in self.tenant_ids: request_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" - response_queue_name = f"{self.service_response_queue_prefix}_{tenant_id}" self.channel.queue_purge(request_queue_name) - self.channel.queue_purge(response_queue_name) logger.info("Queues purged.") except pika.exceptions.ChannelWrongStateError: pass @@ -380,18 +365,6 @@ class ServiceQueueManager(BaseQueueManager): ) self.channel.queue_bind(queue=request_queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) - response_queue_name = f"{self.service_response_queue_prefix}_{tenant_id}" - self.channel.queue_declare( - queue=response_queue_name, - durable=True, - arguments={ - "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.service_dlq_name, - "x-expires": self.queue_expiration_time, # TODO: check if necessary - }, - ) - self.channel.queue_bind(queue=response_queue_name, exchange=self.service_response_exchange_name, routing_key=tenant_id) - self.tenant_ids.append(tenant_id) logger.debug(f"Added tenant {tenant_id}.") @@ -400,10 +373,6 @@ class ServiceQueueManager(BaseQueueManager): self.channel.queue_unbind(queue=request_queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) self.channel.queue_delete(request_queue_name) - response_queue_name = f"{self.service_response_queue_prefix}_{tenant_id}" - self.channel.queue_unbind(queue=response_queue_name, exchange=self.service_response_exchange_name, routing_key=tenant_id) - self.channel.queue_delete(response_queue_name) - self.tenant_ids.remove(tenant_id) logger.debug(f"Deleted tenant {tenant_id}.") From aa238948587a34f293c72ea4da54479eb26b5f6f Mon Sep 17 00:00:00 2001 From: "francisco.schulz" Date: Thu, 11 Jul 2024 11:55:17 -0400 Subject: [PATCH 13/35] chose(dependencies): update --- poetry.lock | 767 +++++++++++++++++++++++++++++++------------------ pyproject.toml | 1 + 2 files changed, 495 insertions(+), 273 deletions(-) diff --git a/poetry.lock b/poetry.lock index 5f75804..4c2216e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -15,6 +15,102 @@ files = [ aiormq = ">=6.8.0,<6.9.0" yarl = "*" +[[package]] +name = "aiohttp" +version = "3.9.5" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fcde4c397f673fdec23e6b05ebf8d4751314fa7c24f93334bf1f1364c1c69ac7"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d6b3f1fabe465e819aed2c421a6743d8debbde79b6a8600739300630a01bf2c"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae79c1bc12c34082d92bf9422764f799aee4746fd7a392db46b7fd357d4a17a"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d3ebb9e1316ec74277d19c5f482f98cc65a73ccd5430540d6d11682cd857430"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84dabd95154f43a2ea80deffec9cb44d2e301e38a0c9d331cc4aa0166fe28ae3"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a02fbeca6f63cb1f0475c799679057fc9268b77075ab7cf3f1c600e81dd46b"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c26959ca7b75ff768e2776d8055bf9582a6267e24556bb7f7bd29e677932be72"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:714d4e5231fed4ba2762ed489b4aec07b2b9953cf4ee31e9871caac895a839c0"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7a6a8354f1b62e15d48e04350f13e726fa08b62c3d7b8401c0a1314f02e3558"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c413016880e03e69d166efb5a1a95d40f83d5a3a648d16486592c49ffb76d0db"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ff84aeb864e0fac81f676be9f4685f0527b660f1efdc40dcede3c251ef1e867f"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ad7f2919d7dac062f24d6f5fe95d401597fbb015a25771f85e692d043c9d7832"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:702e2c7c187c1a498a4e2b03155d52658fdd6fda882d3d7fbb891a5cf108bb10"}, + {file = "aiohttp-3.9.5-cp310-cp310-win32.whl", hash = "sha256:67c3119f5ddc7261d47163ed86d760ddf0e625cd6246b4ed852e82159617b5fb"}, + {file = "aiohttp-3.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:471f0ef53ccedec9995287f02caf0c068732f026455f07db3f01a46e49d76bbb"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ae53e33ee7476dd3d1132f932eeb39bf6125083820049d06edcdca4381f342"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c088c4d70d21f8ca5c0b8b5403fe84a7bc8e024161febdd4ef04575ef35d474d"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:639d0042b7670222f33b0028de6b4e2fad6451462ce7df2af8aee37dcac55424"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f26383adb94da5e7fb388d441bf09c61e5e35f455a3217bfd790c6b6bc64b2ee"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66331d00fb28dc90aa606d9a54304af76b335ae204d1836f65797d6fe27f1ca2"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff550491f5492ab5ed3533e76b8567f4b37bd2995e780a1f46bca2024223233"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f22eb3a6c1080d862befa0a89c380b4dafce29dc6cd56083f630073d102eb595"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a81b1143d42b66ffc40a441379387076243ef7b51019204fd3ec36b9f69e77d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f64fd07515dad67f24b6ea4a66ae2876c01031de91c93075b8093f07c0a2d93d"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:93e22add827447d2e26d67c9ac0161756007f152fdc5210277d00a85f6c92323"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:55b39c8684a46e56ef8c8d24faf02de4a2b2ac60d26cee93bc595651ff545de9"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4715a9b778f4293b9f8ae7a0a7cef9829f02ff8d6277a39d7f40565c737d3771"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:afc52b8d969eff14e069a710057d15ab9ac17cd4b6753042c407dcea0e40bf75"}, + {file = "aiohttp-3.9.5-cp311-cp311-win32.whl", hash = "sha256:b3df71da99c98534be076196791adca8819761f0bf6e08e07fd7da25127150d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:88e311d98cc0bf45b62fc46c66753a83445f5ab20038bcc1b8a1cc05666f428a"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:c7a4b7a6cf5b6eb11e109a9755fd4fda7d57395f8c575e166d363b9fc3ec4678"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0a158704edf0abcac8ac371fbb54044f3270bdbc93e254a82b6c82be1ef08f3c"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d153f652a687a8e95ad367a86a61e8d53d528b0530ef382ec5aaf533140ed00f"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82a6a97d9771cb48ae16979c3a3a9a18b600a8505b1115cfe354dfb2054468b4"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60cdbd56f4cad9f69c35eaac0fbbdf1f77b0ff9456cebd4902f3dd1cf096464c"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8676e8fd73141ded15ea586de0b7cda1542960a7b9ad89b2b06428e97125d4fa"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da00da442a0e31f1c69d26d224e1efd3a1ca5bcbf210978a2ca7426dfcae9f58"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18f634d540dd099c262e9f887c8bbacc959847cfe5da7a0e2e1cf3f14dbf2daf"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:320e8618eda64e19d11bdb3bd04ccc0a816c17eaecb7e4945d01deee2a22f95f"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:2faa61a904b83142747fc6a6d7ad8fccff898c849123030f8e75d5d967fd4a81"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:8c64a6dc3fe5db7b1b4d2b5cb84c4f677768bdc340611eca673afb7cf416ef5a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:393c7aba2b55559ef7ab791c94b44f7482a07bf7640d17b341b79081f5e5cd1a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c671dc117c2c21a1ca10c116cfcd6e3e44da7fcde37bf83b2be485ab377b25da"}, + {file = "aiohttp-3.9.5-cp312-cp312-win32.whl", hash = "sha256:5a7ee16aab26e76add4afc45e8f8206c95d1d75540f1039b84a03c3b3800dd59"}, + {file = "aiohttp-3.9.5-cp312-cp312-win_amd64.whl", hash = "sha256:5ca51eadbd67045396bc92a4345d1790b7301c14d1848feaac1d6a6c9289e888"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:694d828b5c41255e54bc2dddb51a9f5150b4eefa9886e38b52605a05d96566e8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0605cc2c0088fcaae79f01c913a38611ad09ba68ff482402d3410bf59039bfb8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4558e5012ee03d2638c681e156461d37b7a113fe13970d438d95d10173d25f78"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dbc053ac75ccc63dc3a3cc547b98c7258ec35a215a92bd9f983e0aac95d3d5b"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4109adee842b90671f1b689901b948f347325045c15f46b39797ae1bf17019de"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6ea1a5b409a85477fd8e5ee6ad8f0e40bf2844c270955e09360418cfd09abac"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3c2890ca8c59ee683fd09adf32321a40fe1cf164e3387799efb2acebf090c11"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3916c8692dbd9d55c523374a3b8213e628424d19116ac4308e434dbf6d95bbdd"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8d1964eb7617907c792ca00b341b5ec3e01ae8c280825deadbbd678447b127e1"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5ab8e1f6bee051a4bf6195e38a5c13e5e161cb7bad83d8854524798bd9fcd6e"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:52c27110f3862a1afbcb2af4281fc9fdc40327fa286c4625dfee247c3ba90156"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:7f64cbd44443e80094309875d4f9c71d0401e966d191c3d469cde4642bc2e031"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b4f72fbb66279624bfe83fd5eb6aea0022dad8eec62b71e7bf63ee1caadeafe"}, + {file = "aiohttp-3.9.5-cp38-cp38-win32.whl", hash = "sha256:6380c039ec52866c06d69b5c7aad5478b24ed11696f0e72f6b807cfb261453da"}, + {file = "aiohttp-3.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:da22dab31d7180f8c3ac7c7635f3bcd53808f374f6aa333fe0b0b9e14b01f91a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1732102949ff6087589408d76cd6dea656b93c896b011ecafff418c9661dc4ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c6021d296318cb6f9414b48e6a439a7f5d1f665464da507e8ff640848ee2a58a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:239f975589a944eeb1bad26b8b140a59a3a320067fb3cd10b75c3092405a1372"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b7b30258348082826d274504fbc7c849959f1989d86c29bc355107accec6cfb"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2adf5c87ff6d8b277814a28a535b59e20bfea40a101db6b3bdca7e9926bc24"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a3d838441bebcf5cf442700e3963f58b5c33f015341f9ea86dcd7d503c07e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3a1ae66e3d0c17cf65c08968a5ee3180c5a95920ec2731f53343fac9bad106"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c69e77370cce2d6df5d12b4e12bdcca60c47ba13d1cbbc8645dd005a20b738b"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf56238f4bbf49dab8c2dc2e6b1b68502b1e88d335bea59b3f5b9f4c001475"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d1469f228cd9ffddd396d9948b8c9cd8022b6d1bf1e40c6f25b0fb90b4f893ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:45731330e754f5811c314901cebdf19dd776a44b31927fa4b4dbecab9e457b0c"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3fcb4046d2904378e3aeea1df51f697b0467f2aac55d232c87ba162709478c46"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8cf142aa6c1a751fcb364158fd710b8a9be874b81889c2bd13aa8893197455e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-win32.whl", hash = "sha256:7b179eea70833c8dee51ec42f3b4097bd6370892fa93f510f76762105568cf09"}, + {file = "aiohttp-3.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:38d80498e2e169bc61418ff36170e0aad0cd268da8b38a17c4cf29d254a8b3f1"}, + {file = "aiohttp-3.9.5.tar.gz", hash = "sha256:edea7d15772ceeb29db4aff55e482d4bcfb6ae160ce144f2682de02f6d693551"}, +] + +[package.dependencies] +aiosignal = ">=1.1.2" +async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns", "brotlicffi"] + [[package]] name = "aiormq" version = "6.8.0" @@ -30,6 +126,20 @@ files = [ pamqp = "3.3.0" yarl = "*" +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + [[package]] name = "annotated-types" version = "0.7.0" @@ -199,6 +309,17 @@ six = ">=1.12.0" astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"] test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] +[[package]] +name = "async-timeout" +version = "4.0.3" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, + {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, +] + [[package]] name = "attrs" version = "23.2.0" @@ -375,13 +496,13 @@ files = [ [[package]] name = "certifi" -version = "2024.6.2" +version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.6.2-py3-none-any.whl", hash = "sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56"}, - {file = "certifi-2024.6.2.tar.gz", hash = "sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516"}, + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, ] [[package]] @@ -750,13 +871,13 @@ tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""} [[package]] name = "cyclonedx-python-lib" -version = "7.4.1" +version = "7.5.1" description = "Python library for CycloneDX" optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "cyclonedx_python_lib-7.4.1-py3-none-any.whl", hash = "sha256:73bf8d5c09ad10698c75d3ce3f123c84c9aff3959d67b8b5ca9e5a7c5da43abe"}, - {file = "cyclonedx_python_lib-7.4.1.tar.gz", hash = "sha256:23bf8196e008bb8e06c1040ad2ab69492891d8a581cb2aefa36a77f199790a37"}, + {file = "cyclonedx_python_lib-7.5.1-py3-none-any.whl", hash = "sha256:9fc2c2e5facfd9530ede1f4525c903d29d91945688c5689b6d5fab46381dcab9"}, + {file = "cyclonedx_python_lib-7.5.1.tar.gz", hash = "sha256:00cfe1e58452698650ae08b8f4389f7b1ec203a3e1c50cbf6ca6d320941dfb3f"}, ] [package.dependencies] @@ -764,7 +885,7 @@ jsonschema = {version = ">=4.18,<5.0", extras = ["format"], optional = true, mar license-expression = ">=30,<31" lxml = {version = ">=4,<6", optional = true, markers = "extra == \"validation\" or extra == \"xml-validation\""} packageurl-python = ">=0.11,<2" -py-serializable = ">=1.0.3,<2" +py-serializable = ">=1.1.0,<2.0.0" sortedcontainers = ">=2.4.0,<3.0.0" [package.extras] @@ -975,6 +1096,92 @@ files = [ {file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"}, ] +[[package]] +name = "frozenlist" +version = "1.4.1" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +files = [ + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, + {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, + {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, + {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, + {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, + {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, + {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, + {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, + {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, + {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, + {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, + {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, + {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, +] + [[package]] name = "funcy" version = "2.0" @@ -1005,61 +1212,61 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] [[package]] name = "grpcio" -version = "1.64.1" +version = "1.65.0" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.8" files = [ - {file = "grpcio-1.64.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:55697ecec192bc3f2f3cc13a295ab670f51de29884ca9ae6cd6247df55df2502"}, - {file = "grpcio-1.64.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:3b64ae304c175671efdaa7ec9ae2cc36996b681eb63ca39c464958396697daff"}, - {file = "grpcio-1.64.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:bac71b4b28bc9af61efcdc7630b166440bbfbaa80940c9a697271b5e1dabbc61"}, - {file = "grpcio-1.64.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c024ffc22d6dc59000faf8ad781696d81e8e38f4078cb0f2630b4a3cf231a90"}, - {file = "grpcio-1.64.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7cd5c1325f6808b8ae31657d281aadb2a51ac11ab081ae335f4f7fc44c1721d"}, - {file = "grpcio-1.64.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0a2813093ddb27418a4c99f9b1c223fab0b053157176a64cc9db0f4557b69bd9"}, - {file = "grpcio-1.64.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2981c7365a9353f9b5c864595c510c983251b1ab403e05b1ccc70a3d9541a73b"}, - {file = "grpcio-1.64.1-cp310-cp310-win32.whl", hash = "sha256:1262402af5a511c245c3ae918167eca57342c72320dffae5d9b51840c4b2f86d"}, - {file = "grpcio-1.64.1-cp310-cp310-win_amd64.whl", hash = "sha256:19264fc964576ddb065368cae953f8d0514ecc6cb3da8903766d9fb9d4554c33"}, - {file = "grpcio-1.64.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:58b1041e7c870bb30ee41d3090cbd6f0851f30ae4eb68228955d973d3efa2e61"}, - {file = "grpcio-1.64.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bbc5b1d78a7822b0a84c6f8917faa986c1a744e65d762ef6d8be9d75677af2ca"}, - {file = "grpcio-1.64.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:5841dd1f284bd1b3d8a6eca3a7f062b06f1eec09b184397e1d1d43447e89a7ae"}, - {file = "grpcio-1.64.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8caee47e970b92b3dd948371230fcceb80d3f2277b3bf7fbd7c0564e7d39068e"}, - {file = "grpcio-1.64.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73819689c169417a4f978e562d24f2def2be75739c4bed1992435d007819da1b"}, - {file = "grpcio-1.64.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6503b64c8b2dfad299749cad1b595c650c91e5b2c8a1b775380fcf8d2cbba1e9"}, - {file = "grpcio-1.64.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1de403fc1305fd96cfa75e83be3dee8538f2413a6b1685b8452301c7ba33c294"}, - {file = "grpcio-1.64.1-cp311-cp311-win32.whl", hash = "sha256:d4d29cc612e1332237877dfa7fe687157973aab1d63bd0f84cf06692f04c0367"}, - {file = "grpcio-1.64.1-cp311-cp311-win_amd64.whl", hash = "sha256:5e56462b05a6f860b72f0fa50dca06d5b26543a4e88d0396259a07dc30f4e5aa"}, - {file = "grpcio-1.64.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:4657d24c8063e6095f850b68f2d1ba3b39f2b287a38242dcabc166453e950c59"}, - {file = "grpcio-1.64.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:62b4e6eb7bf901719fce0ca83e3ed474ae5022bb3827b0a501e056458c51c0a1"}, - {file = "grpcio-1.64.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:ee73a2f5ca4ba44fa33b4d7d2c71e2c8a9e9f78d53f6507ad68e7d2ad5f64a22"}, - {file = "grpcio-1.64.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:198908f9b22e2672a998870355e226a725aeab327ac4e6ff3a1399792ece4762"}, - {file = "grpcio-1.64.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39b9d0acaa8d835a6566c640f48b50054f422d03e77e49716d4c4e8e279665a1"}, - {file = "grpcio-1.64.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5e42634a989c3aa6049f132266faf6b949ec2a6f7d302dbb5c15395b77d757eb"}, - {file = "grpcio-1.64.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b1a82e0b9b3022799c336e1fc0f6210adc019ae84efb7321d668129d28ee1efb"}, - {file = "grpcio-1.64.1-cp312-cp312-win32.whl", hash = "sha256:55260032b95c49bee69a423c2f5365baa9369d2f7d233e933564d8a47b893027"}, - {file = "grpcio-1.64.1-cp312-cp312-win_amd64.whl", hash = "sha256:c1a786ac592b47573a5bb7e35665c08064a5d77ab88a076eec11f8ae86b3e3f6"}, - {file = "grpcio-1.64.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:a011ac6c03cfe162ff2b727bcb530567826cec85eb8d4ad2bfb4bd023287a52d"}, - {file = "grpcio-1.64.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4d6dab6124225496010bd22690f2d9bd35c7cbb267b3f14e7a3eb05c911325d4"}, - {file = "grpcio-1.64.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:a5e771d0252e871ce194d0fdcafd13971f1aae0ddacc5f25615030d5df55c3a2"}, - {file = "grpcio-1.64.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c3c1b90ab93fed424e454e93c0ed0b9d552bdf1b0929712b094f5ecfe7a23ad"}, - {file = "grpcio-1.64.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20405cb8b13fd779135df23fabadc53b86522d0f1cba8cca0e87968587f50650"}, - {file = "grpcio-1.64.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0cc79c982ccb2feec8aad0e8fb0d168bcbca85bc77b080d0d3c5f2f15c24ea8f"}, - {file = "grpcio-1.64.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a3a035c37ce7565b8f4f35ff683a4db34d24e53dc487e47438e434eb3f701b2a"}, - {file = "grpcio-1.64.1-cp38-cp38-win32.whl", hash = "sha256:1257b76748612aca0f89beec7fa0615727fd6f2a1ad580a9638816a4b2eb18fd"}, - {file = "grpcio-1.64.1-cp38-cp38-win_amd64.whl", hash = "sha256:0a12ddb1678ebc6a84ec6b0487feac020ee2b1659cbe69b80f06dbffdb249122"}, - {file = "grpcio-1.64.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:75dbbf415026d2862192fe1b28d71f209e2fd87079d98470db90bebe57b33179"}, - {file = "grpcio-1.64.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e3d9f8d1221baa0ced7ec7322a981e28deb23749c76eeeb3d33e18b72935ab62"}, - {file = "grpcio-1.64.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:5f8b75f64d5d324c565b263c67dbe4f0af595635bbdd93bb1a88189fc62ed2e5"}, - {file = "grpcio-1.64.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c84ad903d0d94311a2b7eea608da163dace97c5fe9412ea311e72c3684925602"}, - {file = "grpcio-1.64.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:940e3ec884520155f68a3b712d045e077d61c520a195d1a5932c531f11883489"}, - {file = "grpcio-1.64.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f10193c69fc9d3d726e83bbf0f3d316f1847c3071c8c93d8090cf5f326b14309"}, - {file = "grpcio-1.64.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ac15b6c2c80a4d1338b04d42a02d376a53395ddf0ec9ab157cbaf44191f3ffdd"}, - {file = "grpcio-1.64.1-cp39-cp39-win32.whl", hash = "sha256:03b43d0ccf99c557ec671c7dede64f023c7da9bb632ac65dbc57f166e4970040"}, - {file = "grpcio-1.64.1-cp39-cp39-win_amd64.whl", hash = "sha256:ed6091fa0adcc7e4ff944090cf203a52da35c37a130efa564ded02b7aff63bcd"}, - {file = "grpcio-1.64.1.tar.gz", hash = "sha256:8d51dd1c59d5fa0f34266b80a3805ec29a1f26425c2a54736133f6d87fc4968a"}, + {file = "grpcio-1.65.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:66ea0ca6108fcb391444bb7b37d04eac85bfaea1cfaf16db675d3734fc74ca1b"}, + {file = "grpcio-1.65.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:45d371dc4436fdcc31677f75b3ebe6175fbf0712ced49e0e4dfc18bbaf50f5a7"}, + {file = "grpcio-1.65.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:02dbbe113ec48581da07b7ddf52bfd49f5772374c4b5e36ea25131ce00b4f4f3"}, + {file = "grpcio-1.65.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5c9ee7b8f1ac82cc24f223cd7ec803c17079f90e63022d3e66c5e53fff0afb99"}, + {file = "grpcio-1.65.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da927f8a44e42837ae0027a3a063c85e2b26491d2babd4554e116f66fd46045d"}, + {file = "grpcio-1.65.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9916ea670a589f95f2453a4a5040294ace096271c126e684a1e45e61af76c988"}, + {file = "grpcio-1.65.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c46114787c5f530e845d2781f914600aade04b4f132dd012efb31bc4f76a72bb"}, + {file = "grpcio-1.65.0-cp310-cp310-win32.whl", hash = "sha256:1362d94ac9c05b202736180d23296840e00f495859b206261e6ed03a6d41978b"}, + {file = "grpcio-1.65.0-cp310-cp310-win_amd64.whl", hash = "sha256:00ed0828980009ce852d98230cdd2d5a22a4bcb946b5a0f6334dfd8258374cd7"}, + {file = "grpcio-1.65.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:25303f3747522252dd9cfcbacb88d828a36040f513e28fba17ee6184ebc3d330"}, + {file = "grpcio-1.65.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2a2b368717dd8e0f6cb7e412d3b3bfb0012f61c04b2f76dbed669b0f5cf3fb0c"}, + {file = "grpcio-1.65.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:93c41fb74c576dc0130b190a5775197282115c6abbe1d913d42d9a2f9d98fdae"}, + {file = "grpcio-1.65.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34eb4fb9ef4d11ea741d264916d1b31a9e169d539a6f1c8300e04c493eec747e"}, + {file = "grpcio-1.65.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55c41272f9d7d3503e3e3e93f3f98589f07075eebd24e1c291a1df2e8ef40a49"}, + {file = "grpcio-1.65.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c275bac926754022c89ef03f16470f65b811e2cc25f2167d365564ad43e31001"}, + {file = "grpcio-1.65.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b02db2a59071f4d05cfc4d0c972759778d27e1d3347f22ca178b91117ad10541"}, + {file = "grpcio-1.65.0-cp311-cp311-win32.whl", hash = "sha256:ec9f41b9b0eb6407a6edb21bc22cb32e03cae76cde9c1d8bb151ed77c2c5af94"}, + {file = "grpcio-1.65.0-cp311-cp311-win_amd64.whl", hash = "sha256:3efc8b0600870f5e518dd2738188b3ba7b1bb2668244c9a2a8c4debda4ffe62b"}, + {file = "grpcio-1.65.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:d787abafafa9ed71e17220d4178c883abdb380e0484bd8965cb2e06375c7495b"}, + {file = "grpcio-1.65.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:52347f21d6ec77d7e7e4d5037f5e8ac0a0c851856d9459f9f95b009c2c740b4a"}, + {file = "grpcio-1.65.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:b16e1cd9b9cb9ac942cb20b7a2b1c5d35b9e61017e2998bf242a6f7748071795"}, + {file = "grpcio-1.65.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89bc9c8c6743a48f115fea8f3fada76be269d1914bf636e5fdb7cec9cdf192bc"}, + {file = "grpcio-1.65.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5a2ae900e6423438c4a9a5be38e9228621340a18333371215c0419d24a254ef"}, + {file = "grpcio-1.65.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4f451091ddd28f00c655f0b1e208cca705d40e4fde56a3cf849fead61a700d10"}, + {file = "grpcio-1.65.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4e30cd885e02abb98d6b0d5beb6259a567b0ce1416c498ec815fe383adb77864"}, + {file = "grpcio-1.65.0-cp312-cp312-win32.whl", hash = "sha256:9a9a0ce10a07923ebd48c056060052ebddfbec3193cdd32207af358ef317b00a"}, + {file = "grpcio-1.65.0-cp312-cp312-win_amd64.whl", hash = "sha256:87d9350ffe1a84b7441db7c70fdb4e51269a379f7a95d696d0d133831c4f9a19"}, + {file = "grpcio-1.65.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:0c504b30fc2fba143d9254e0240243b5866df9b7523162448797f4b21b5f30d5"}, + {file = "grpcio-1.65.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:480be4d41ceb5a7f22ecfc8db1ab68aeb58cc1a2da0865a91917d3cd0438dac7"}, + {file = "grpcio-1.65.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:984a1627b50d5df4a24120302ca95adb5139ba1c40354ba258fc2913666d8ee7"}, + {file = "grpcio-1.65.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f242956c0f4985dfcc920cd251cd7a899ca168e157e98c9b74a688657e813ad6"}, + {file = "grpcio-1.65.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ea93f570b2341c69635b8a333afb99fb4d5584f26a9cc94f06e56c943648aab"}, + {file = "grpcio-1.65.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1bebefd76517a43d0e77a5dcd61a8b69e9775340d856a0b35c6368ae628f7714"}, + {file = "grpcio-1.65.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:356d10a491a92a08c21aef806379f7b020f591c23580e3d29aeeb59d45908c86"}, + {file = "grpcio-1.65.0-cp38-cp38-win32.whl", hash = "sha256:c3294fd3ef9faa1fe14ad15d72dd7d2ee9fee6d3bd29a08c53e59a3c94de9cc9"}, + {file = "grpcio-1.65.0-cp38-cp38-win_amd64.whl", hash = "sha256:a2defc49c984550f25034e88d17a7e69dba6deb2b981d8f56f19b3aaa788ff30"}, + {file = "grpcio-1.65.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:b73022222ed4bf718d3d8527a9b88b162074a62c7530d30f4e951b56304b0f19"}, + {file = "grpcio-1.65.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16e0f789158ecc8309e0a2f16cb8c5e4753f351a7673aab75f42783c83f1e38b"}, + {file = "grpcio-1.65.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:cb0bd8bfba21fe0318317bf11687c67a3f8ce726369c0b3ccf4e6607fc5bc5f2"}, + {file = "grpcio-1.65.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1096f0fa79ec601aefd71685d3a610cdde96274c38cd8adcef972660297669a"}, + {file = "grpcio-1.65.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e576a88ce82fea70e68c548aceb5cd560c27da50091581996858bbbe01230c83"}, + {file = "grpcio-1.65.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ab70bd1ccb05ef373b691a9b9985289d8b2cf63c704471f5ee132e228d351af5"}, + {file = "grpcio-1.65.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:03eab632a8ce8dba00d97482d2821bf752a7c3cb4dc051be6c587ad3ca1c3e6d"}, + {file = "grpcio-1.65.0-cp39-cp39-win32.whl", hash = "sha256:f19bb85795ca82e007be427e7b6ac5e730023ffbab69d39ddeb1b84c6339df16"}, + {file = "grpcio-1.65.0-cp39-cp39-win_amd64.whl", hash = "sha256:dbd7eeafa67d8e403ac61caa31ebda2861435dcfd7bb7953c4ef05ad2ecf74bf"}, + {file = "grpcio-1.65.0.tar.gz", hash = "sha256:2c7891f66daefc80cce1bed6bc0c2802d26dac46544ba1be79c4e7d85661dd73"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.64.1)"] +protobuf = ["grpcio-tools (>=1.65.0)"] [[package]] name = "h11" @@ -1074,13 +1281,13 @@ files = [ [[package]] name = "identify" -version = "2.5.36" +version = "2.6.0" description = "File identification library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "identify-2.5.36-py2.py3-none-any.whl", hash = "sha256:37d93f380f4de590500d9dba7db359d0d3da95ffe7f9de1753faa159e71e7dfa"}, - {file = "identify-2.5.36.tar.gz", hash = "sha256:e5e00f54165f9047fbebeb4a560f9acfb8af4c88232be60a488e9b68d122745d"}, + {file = "identify-2.6.0-py2.py3-none-any.whl", hash = "sha256:e79ae4406387a9d300332b5fd366d8994f1525e8414984e1a59e058b2eda2dd0"}, + {file = "identify-2.6.0.tar.gz", hash = "sha256:cb171c685bdc31bcc4c1734698736a7d5b6c8bf2e0c15117f4d469c8640ae5cf"}, ] [package.extras] @@ -1129,13 +1336,13 @@ files = [ [[package]] name = "ipykernel" -version = "6.29.4" +version = "6.29.5" description = "IPython Kernel for Jupyter" optional = false python-versions = ">=3.8" files = [ - {file = "ipykernel-6.29.4-py3-none-any.whl", hash = "sha256:1181e653d95c6808039c509ef8e67c4126b3b3af7781496c7cbfb5ed938a27da"}, - {file = "ipykernel-6.29.4.tar.gz", hash = "sha256:3d44070060f9475ac2092b760123fadf105d2e2493c24848b6691a7c4f42af5c"}, + {file = "ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5"}, + {file = "ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215"}, ] [package.dependencies] @@ -1162,13 +1369,13 @@ test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio [[package]] name = "ipython" -version = "8.25.0" +version = "8.26.0" description = "IPython: Productive Interactive Computing" optional = false python-versions = ">=3.10" files = [ - {file = "ipython-8.25.0-py3-none-any.whl", hash = "sha256:53eee7ad44df903a06655871cbab66d156a051fd86f3ec6750470ac9604ac1ab"}, - {file = "ipython-8.25.0.tar.gz", hash = "sha256:c6ed726a140b6e725b911528f80439c534fac915246af3efc39440a6b0f9d716"}, + {file = "ipython-8.26.0-py3-none-any.whl", hash = "sha256:e6b347c27bdf9c32ee9d31ae85defc525755a1869f14057e900675b9e8d6e6ff"}, + {file = "ipython-8.26.0.tar.gz", hash = "sha256:1cec0fbba8404af13facebe83d04436a7434c7400e59f47acf467c64abd0956c"}, ] [package.dependencies] @@ -1195,7 +1402,7 @@ nbformat = ["nbformat"] notebook = ["ipywidgets", "notebook"] parallel = ["ipyparallel"] qtconsole = ["qtconsole"] -test = ["pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"] +test = ["packaging", "pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"] test-extra = ["curio", "ipython[test]", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.23)", "pandas", "trio"] [[package]] @@ -1272,13 +1479,13 @@ files = [ [[package]] name = "jsonschema" -version = "4.22.0" +version = "4.23.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" files = [ - {file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"}, - {file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"}, + {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, + {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, ] [package.dependencies] @@ -1297,7 +1504,7 @@ webcolors = {version = ">=1.11", optional = true, markers = "extra == \"format\" [package.extras] format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] -format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] [[package]] name = "jsonschema-specifications" @@ -1497,9 +1704,13 @@ files = [ {file = "lxml-5.2.2-cp36-cp36m-win_amd64.whl", hash = "sha256:edcfa83e03370032a489430215c1e7783128808fd3e2e0a3225deee278585196"}, {file = "lxml-5.2.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:28bf95177400066596cdbcfc933312493799382879da504633d16cf60bba735b"}, {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a745cc98d504d5bd2c19b10c79c61c7c3df9222629f1b6210c0368177589fb8"}, + {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b590b39ef90c6b22ec0be925b211298e810b4856909c8ca60d27ffbca6c12e6"}, {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b336b0416828022bfd5a2e3083e7f5ba54b96242159f83c7e3eebaec752f1716"}, + {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:c2faf60c583af0d135e853c86ac2735ce178f0e338a3c7f9ae8f622fd2eb788c"}, {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:4bc6cb140a7a0ad1f7bc37e018d0ed690b7b6520ade518285dc3171f7a117905"}, + {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7ff762670cada8e05b32bf1e4dc50b140790909caa8303cfddc4d702b71ea184"}, {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:57f0a0bbc9868e10ebe874e9f129d2917750adf008fe7b9c1598c0fbbfdde6a6"}, + {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:a6d2092797b388342c1bc932077ad232f914351932353e2e8706851c870bca1f"}, {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:60499fe961b21264e17a471ec296dcbf4365fbea611bf9e303ab69db7159ce61"}, {file = "lxml-5.2.2-cp37-cp37m-win32.whl", hash = "sha256:d9b342c76003c6b9336a80efcc766748a333573abf9350f4094ee46b006ec18f"}, {file = "lxml-5.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b16db2770517b8799c79aa80f4053cd6f8b716f21f8aca962725a9565ce3ee40"}, @@ -2171,13 +2382,13 @@ files = [ [[package]] name = "packageurl-python" -version = "0.15.1" +version = "0.15.3" description = "A purl aka. Package URL parser and builder" optional = false python-versions = ">=3.7" files = [ - {file = "packageurl_python-0.15.1-py3-none-any.whl", hash = "sha256:f7a44ddb9caaf6197b3b62b890ed0be5cb15e962accab2a51db36846d5174562"}, - {file = "packageurl_python-0.15.1.tar.gz", hash = "sha256:9a37b9a7cad9a2872b4612151ba3749fd9dec90485577c14d374b6e66b7edf03"}, + {file = "packageurl_python-0.15.3-py3-none-any.whl", hash = "sha256:96624702032239e70e61b950e14460a5b5f87ac21fc68f119414047b94f0de27"}, + {file = "packageurl_python-0.15.3.tar.gz", hash = "sha256:82e1150f1fc228e25e7b3be1c641ef96b6a0811526c0b4e4f7882a181e862607"}, ] [package.extras] @@ -2450,13 +2661,13 @@ files = [ [[package]] name = "py-serializable" -version = "1.0.3" +version = "1.1.0" description = "Library for serializing and deserializing Python Objects to and from JSON and XML." optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "py_serializable-1.0.3-py3-none-any.whl", hash = "sha256:afba815f465b9fe7ab1c1a56d1aa8880c8a9e67a6e28b7ed62d4696fa369caf8"}, - {file = "py_serializable-1.0.3.tar.gz", hash = "sha256:da3cb4b1f3cc5cc5ebecdd3dadbabd5f65d764357366fa64ee9cbaf0d4b70dcf"}, + {file = "py_serializable-1.1.0-py3-none-any.whl", hash = "sha256:ae7ae4326b0d037b7e710f6e8bb1a97ece4ac2895a1f443a17ffd17f85547d76"}, + {file = "py_serializable-1.1.0.tar.gz", hash = "sha256:3311ab39063b131caca0fb75e2038153682e55576c67f24a2de72d402dccb6e0"}, ] [package.dependencies] @@ -2516,109 +2727,119 @@ files = [ [[package]] name = "pydantic" -version = "2.7.4" +version = "2.8.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"}, - {file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"}, + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.18.4" -typing-extensions = ">=4.6.1" +pydantic-core = "2.20.1" +typing-extensions = {version = ">=4.6.1", markers = "python_version < \"3.13\""} [package.extras] email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.18.4" +version = "2.20.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"}, - {file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"}, - {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"}, - {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"}, - {file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"}, - {file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"}, - {file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"}, - {file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"}, - {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"}, - {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"}, - {file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"}, - {file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"}, - {file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"}, - {file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"}, - {file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"}, - {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"}, - {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"}, - {file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"}, - {file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"}, - {file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"}, - {file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"}, - {file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"}, - {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"}, - {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"}, - {file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"}, - {file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"}, - {file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"}, - {file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"}, - {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"}, - {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"}, - {file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"}, - {file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"}, - {file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, ] [package.dependencies] @@ -2640,13 +2861,13 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pylint" -version = "3.2.4" +version = "3.2.5" description = "python code static checker" optional = false python-versions = ">=3.8.0" files = [ - {file = "pylint-3.2.4-py3-none-any.whl", hash = "sha256:43b8ffdf1578e4e4439fa1f6ace402281f5dd61999192280fa12fe411bef2999"}, - {file = "pylint-3.2.4.tar.gz", hash = "sha256:5753d27e49a658b12a48c2883452751a2ecfc7f38594e0980beb03a6e77e6f86"}, + {file = "pylint-3.2.5-py3-none-any.whl", hash = "sha256:32cd6c042b5004b8e857d727708720c54a676d1e22917cf1a2df9b4d4868abd6"}, + {file = "pylint-3.2.5.tar.gz", hash = "sha256:e9b7171e242dcc6ebd0aaa7540481d1a72860748a0a7816b8fe6cf6c80a6fe7e"}, ] [package.dependencies] @@ -3006,126 +3227,126 @@ files = [ [[package]] name = "rpds-py" -version = "0.18.1" +version = "0.19.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.8" files = [ - {file = "rpds_py-0.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d31dea506d718693b6b2cffc0648a8929bdc51c70a311b2770f09611caa10d53"}, - {file = "rpds_py-0.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:732672fbc449bab754e0b15356c077cc31566df874964d4801ab14f71951ea80"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a98a1f0552b5f227a3d6422dbd61bc6f30db170939bd87ed14f3c339aa6c7c9"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f1944ce16401aad1e3f7d312247b3d5de7981f634dc9dfe90da72b87d37887d"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38e14fb4e370885c4ecd734f093a2225ee52dc384b86fa55fe3f74638b2cfb09"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08d74b184f9ab6289b87b19fe6a6d1a97fbfea84b8a3e745e87a5de3029bf944"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d70129cef4a8d979caa37e7fe957202e7eee8ea02c5e16455bc9808a59c6b2f0"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce0bb20e3a11bd04461324a6a798af34d503f8d6f1aa3d2aa8901ceaf039176d"}, - {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81c5196a790032e0fc2464c0b4ab95f8610f96f1f2fa3d4deacce6a79852da60"}, - {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f3027be483868c99b4985fda802a57a67fdf30c5d9a50338d9db646d590198da"}, - {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d44607f98caa2961bab4fa3c4309724b185b464cdc3ba6f3d7340bac3ec97cc1"}, - {file = "rpds_py-0.18.1-cp310-none-win32.whl", hash = "sha256:c273e795e7a0f1fddd46e1e3cb8be15634c29ae8ff31c196debb620e1edb9333"}, - {file = "rpds_py-0.18.1-cp310-none-win_amd64.whl", hash = "sha256:8352f48d511de5f973e4f2f9412736d7dea76c69faa6d36bcf885b50c758ab9a"}, - {file = "rpds_py-0.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6b5ff7e1d63a8281654b5e2896d7f08799378e594f09cf3674e832ecaf396ce8"}, - {file = "rpds_py-0.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8927638a4d4137a289e41d0fd631551e89fa346d6dbcfc31ad627557d03ceb6d"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:154bf5c93d79558b44e5b50cc354aa0459e518e83677791e6adb0b039b7aa6a7"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07f2139741e5deb2c5154a7b9629bc5aa48c766b643c1a6750d16f865a82c5fc"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c7672e9fba7425f79019db9945b16e308ed8bc89348c23d955c8c0540da0a07"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:489bdfe1abd0406eba6b3bb4fdc87c7fa40f1031de073d0cfb744634cc8fa261"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c20f05e8e3d4fc76875fc9cb8cf24b90a63f5a1b4c5b9273f0e8225e169b100"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:967342e045564cef76dfcf1edb700b1e20838d83b1aa02ab313e6a497cf923b8"}, - {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2cc7c1a47f3a63282ab0f422d90ddac4aa3034e39fc66a559ab93041e6505da7"}, - {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f7afbfee1157e0f9376c00bb232e80a60e59ed716e3211a80cb8506550671e6e"}, - {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9e6934d70dc50f9f8ea47081ceafdec09245fd9f6032669c3b45705dea096b88"}, - {file = "rpds_py-0.18.1-cp311-none-win32.whl", hash = "sha256:c69882964516dc143083d3795cb508e806b09fc3800fd0d4cddc1df6c36e76bb"}, - {file = "rpds_py-0.18.1-cp311-none-win_amd64.whl", hash = "sha256:70a838f7754483bcdc830444952fd89645569e7452e3226de4a613a4c1793fb2"}, - {file = "rpds_py-0.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:3dd3cd86e1db5aadd334e011eba4e29d37a104b403e8ca24dcd6703c68ca55b3"}, - {file = "rpds_py-0.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:05f3d615099bd9b13ecf2fc9cf2d839ad3f20239c678f461c753e93755d629ee"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35b2b771b13eee8729a5049c976197ff58a27a3829c018a04341bcf1ae409b2b"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ee17cd26b97d537af8f33635ef38be873073d516fd425e80559f4585a7b90c43"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b646bf655b135ccf4522ed43d6902af37d3f5dbcf0da66c769a2b3938b9d8184"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19ba472b9606c36716062c023afa2484d1e4220548751bda14f725a7de17b4f6"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e30ac5e329098903262dc5bdd7e2086e0256aa762cc8b744f9e7bf2a427d3f8"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d58ad6317d188c43750cb76e9deacf6051d0f884d87dc6518e0280438648a9ac"}, - {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e1735502458621921cee039c47318cb90b51d532c2766593be6207eec53e5c4c"}, - {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f5bab211605d91db0e2995a17b5c6ee5edec1270e46223e513eaa20da20076ac"}, - {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2fc24a329a717f9e2448f8cd1f960f9dac4e45b6224d60734edeb67499bab03a"}, - {file = "rpds_py-0.18.1-cp312-none-win32.whl", hash = "sha256:1805d5901779662d599d0e2e4159d8a82c0b05faa86ef9222bf974572286b2b6"}, - {file = "rpds_py-0.18.1-cp312-none-win_amd64.whl", hash = "sha256:720edcb916df872d80f80a1cc5ea9058300b97721efda8651efcd938a9c70a72"}, - {file = "rpds_py-0.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:c827576e2fa017a081346dce87d532a5310241648eb3700af9a571a6e9fc7e74"}, - {file = "rpds_py-0.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:aa3679e751408d75a0b4d8d26d6647b6d9326f5e35c00a7ccd82b78ef64f65f8"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0abeee75434e2ee2d142d650d1e54ac1f8b01e6e6abdde8ffd6eeac6e9c38e20"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed402d6153c5d519a0faf1bb69898e97fb31613b49da27a84a13935ea9164dfc"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:338dee44b0cef8b70fd2ef54b4e09bb1b97fc6c3a58fea5db6cc083fd9fc2724"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7750569d9526199c5b97e5a9f8d96a13300950d910cf04a861d96f4273d5b104"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:607345bd5912aacc0c5a63d45a1f73fef29e697884f7e861094e443187c02be5"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:207c82978115baa1fd8d706d720b4a4d2b0913df1c78c85ba73fe6c5804505f0"}, - {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6d1e42d2735d437e7e80bab4d78eb2e459af48c0a46e686ea35f690b93db792d"}, - {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5463c47c08630007dc0fe99fb480ea4f34a89712410592380425a9b4e1611d8e"}, - {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:06d218939e1bf2ca50e6b0ec700ffe755e5216a8230ab3e87c059ebb4ea06afc"}, - {file = "rpds_py-0.18.1-cp38-none-win32.whl", hash = "sha256:312fe69b4fe1ffbe76520a7676b1e5ac06ddf7826d764cc10265c3b53f96dbe9"}, - {file = "rpds_py-0.18.1-cp38-none-win_amd64.whl", hash = "sha256:9437ca26784120a279f3137ee080b0e717012c42921eb07861b412340f85bae2"}, - {file = "rpds_py-0.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:19e515b78c3fc1039dd7da0a33c28c3154458f947f4dc198d3c72db2b6b5dc93"}, - {file = "rpds_py-0.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7b28c5b066bca9a4eb4e2f2663012debe680f097979d880657f00e1c30875a0"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:673fdbbf668dd958eff750e500495ef3f611e2ecc209464f661bc82e9838991e"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d960de62227635d2e61068f42a6cb6aae91a7fe00fca0e3aeed17667c8a34611"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:352a88dc7892f1da66b6027af06a2e7e5d53fe05924cc2cfc56495b586a10b72"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e0ee01ad8260184db21468a6e1c37afa0529acc12c3a697ee498d3c2c4dcaf3"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4c39ad2f512b4041343ea3c7894339e4ca7839ac38ca83d68a832fc8b3748ab"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aaa71ee43a703c321906813bb252f69524f02aa05bf4eec85f0c41d5d62d0f4c"}, - {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6cd8098517c64a85e790657e7b1e509b9fe07487fd358e19431cb120f7d96338"}, - {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4adec039b8e2928983f885c53b7cc4cda8965b62b6596501a0308d2703f8af1b"}, - {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:32b7daaa3e9389db3695964ce8e566e3413b0c43e3394c05e4b243a4cd7bef26"}, - {file = "rpds_py-0.18.1-cp39-none-win32.whl", hash = "sha256:2625f03b105328729f9450c8badda34d5243231eef6535f80064d57035738360"}, - {file = "rpds_py-0.18.1-cp39-none-win_amd64.whl", hash = "sha256:bf18932d0003c8c4d51a39f244231986ab23ee057d235a12b2684ea26a353590"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cbfbea39ba64f5e53ae2915de36f130588bba71245b418060ec3330ebf85678e"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a3d456ff2a6a4d2adcdf3c1c960a36f4fd2fec6e3b4902a42a384d17cf4e7a65"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7700936ef9d006b7ef605dc53aa364da2de5a3aa65516a1f3ce73bf82ecfc7ae"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51584acc5916212e1bf45edd17f3a6b05fe0cbb40482d25e619f824dccb679de"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:942695a206a58d2575033ff1e42b12b2aece98d6003c6bc739fbf33d1773b12f"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b906b5f58892813e5ba5c6056d6a5ad08f358ba49f046d910ad992196ea61397"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f8e3fecca256fefc91bb6765a693d96692459d7d4c644660a9fff32e517843"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7732770412bab81c5a9f6d20aeb60ae943a9b36dcd990d876a773526468e7163"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:bd1105b50ede37461c1d51b9698c4f4be6e13e69a908ab7751e3807985fc0346"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:618916f5535784960f3ecf8111581f4ad31d347c3de66d02e728de460a46303c"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:17c6d2155e2423f7e79e3bb18151c686d40db42d8645e7977442170c360194d4"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c4c4c3f878df21faf5fac86eda32671c27889e13570645a9eea0a1abdd50922"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:fab6ce90574645a0d6c58890e9bcaac8d94dff54fb51c69e5522a7358b80ab64"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:531796fb842b53f2695e94dc338929e9f9dbf473b64710c28af5a160b2a8927d"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:740884bc62a5e2bbb31e584f5d23b32320fd75d79f916f15a788d527a5e83644"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:998125738de0158f088aef3cb264a34251908dd2e5d9966774fdab7402edfab7"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2be6e9dd4111d5b31ba3b74d17da54a8319d8168890fbaea4b9e5c3de630ae5"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0cee71bc618cd93716f3c1bf56653740d2d13ddbd47673efa8bf41435a60daa"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2c3caec4ec5cd1d18e5dd6ae5194d24ed12785212a90b37f5f7f06b8bedd7139"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:27bba383e8c5231cd559affe169ca0b96ec78d39909ffd817f28b166d7ddd4d8"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:a888e8bdb45916234b99da2d859566f1e8a1d2275a801bb8e4a9644e3c7e7909"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:6031b25fb1b06327b43d841f33842b383beba399884f8228a6bb3df3088485ff"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48c2faaa8adfacefcbfdb5f2e2e7bdad081e5ace8d182e5f4ade971f128e6bb3"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:d85164315bd68c0806768dc6bb0429c6f95c354f87485ee3593c4f6b14def2bd"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6afd80f6c79893cfc0574956f78a0add8c76e3696f2d6a15bca2c66c415cf2d4"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa242ac1ff583e4ec7771141606aafc92b361cd90a05c30d93e343a0c2d82a89"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21be4770ff4e08698e1e8e0bce06edb6ea0626e7c8f560bc08222880aca6a6f"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c45a639e93a0c5d4b788b2613bd637468edd62f8f95ebc6fcc303d58ab3f0a8"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:910e71711d1055b2768181efa0a17537b2622afeb0424116619817007f8a2b10"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b9bb1f182a97880f6078283b3505a707057c42bf55d8fca604f70dedfdc0772a"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d54f74f40b1f7aaa595a02ff42ef38ca654b1469bef7d52867da474243cc633"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:8d2e182c9ee01135e11e9676e9a62dfad791a7a467738f06726872374a83db49"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:636a15acc588f70fda1661234761f9ed9ad79ebed3f2125d44be0862708b666e"}, - {file = "rpds_py-0.18.1.tar.gz", hash = "sha256:dc48b479d540770c811fbd1eb9ba2bb66951863e448efec2e2c102625328e92f"}, + {file = "rpds_py-0.19.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:fb37bd599f031f1a6fb9e58ec62864ccf3ad549cf14bac527dbfa97123edcca4"}, + {file = "rpds_py-0.19.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3384d278df99ec2c6acf701d067147320b864ef6727405d6470838476e44d9e8"}, + {file = "rpds_py-0.19.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e54548e0be3ac117595408fd4ca0ac9278fde89829b0b518be92863b17ff67a2"}, + {file = "rpds_py-0.19.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8eb488ef928cdbc05a27245e52de73c0d7c72a34240ef4d9893fdf65a8c1a955"}, + {file = "rpds_py-0.19.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a5da93debdfe27b2bfc69eefb592e1831d957b9535e0943a0ee8b97996de21b5"}, + {file = "rpds_py-0.19.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:79e205c70afddd41f6ee79a8656aec738492a550247a7af697d5bd1aee14f766"}, + {file = "rpds_py-0.19.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:959179efb3e4a27610e8d54d667c02a9feaa86bbabaf63efa7faa4dfa780d4f1"}, + {file = "rpds_py-0.19.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a6e605bb9edcf010f54f8b6a590dd23a4b40a8cb141255eec2a03db249bc915b"}, + {file = "rpds_py-0.19.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9133d75dc119a61d1a0ded38fb9ba40a00ef41697cc07adb6ae098c875195a3f"}, + {file = "rpds_py-0.19.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:dd36b712d35e757e28bf2f40a71e8f8a2d43c8b026d881aa0c617b450d6865c9"}, + {file = "rpds_py-0.19.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:354f3a91718489912f2e0fc331c24eaaf6a4565c080e00fbedb6015857c00582"}, + {file = "rpds_py-0.19.0-cp310-none-win32.whl", hash = "sha256:ebcbf356bf5c51afc3290e491d3722b26aaf5b6af3c1c7f6a1b757828a46e336"}, + {file = "rpds_py-0.19.0-cp310-none-win_amd64.whl", hash = "sha256:75a6076289b2df6c8ecb9d13ff79ae0cad1d5fb40af377a5021016d58cd691ec"}, + {file = "rpds_py-0.19.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6d45080095e585f8c5097897313def60caa2046da202cdb17a01f147fb263b81"}, + {file = "rpds_py-0.19.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c5c9581019c96f865483d031691a5ff1cc455feb4d84fc6920a5ffc48a794d8a"}, + {file = "rpds_py-0.19.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1540d807364c84516417115c38f0119dfec5ea5c0dd9a25332dea60b1d26fc4d"}, + {file = "rpds_py-0.19.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9e65489222b410f79711dc3d2d5003d2757e30874096b2008d50329ea4d0f88c"}, + {file = "rpds_py-0.19.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9da6f400eeb8c36f72ef6646ea530d6d175a4f77ff2ed8dfd6352842274c1d8b"}, + {file = "rpds_py-0.19.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37f46bb11858717e0efa7893c0f7055c43b44c103e40e69442db5061cb26ed34"}, + {file = "rpds_py-0.19.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:071d4adc734de562bd11d43bd134330fb6249769b2f66b9310dab7460f4bf714"}, + {file = "rpds_py-0.19.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9625367c8955e4319049113ea4f8fee0c6c1145192d57946c6ffcd8fe8bf48dd"}, + {file = "rpds_py-0.19.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e19509145275d46bc4d1e16af0b57a12d227c8253655a46bbd5ec317e941279d"}, + {file = "rpds_py-0.19.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d438e4c020d8c39961deaf58f6913b1bf8832d9b6f62ec35bd93e97807e9cbc"}, + {file = "rpds_py-0.19.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:90bf55d9d139e5d127193170f38c584ed3c79e16638890d2e36f23aa1630b952"}, + {file = "rpds_py-0.19.0-cp311-none-win32.whl", hash = "sha256:8d6ad132b1bc13d05ffe5b85e7a01a3998bf3a6302ba594b28d61b8c2cf13aaf"}, + {file = "rpds_py-0.19.0-cp311-none-win_amd64.whl", hash = "sha256:7ec72df7354e6b7f6eb2a17fa6901350018c3a9ad78e48d7b2b54d0412539a67"}, + {file = "rpds_py-0.19.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:5095a7c838a8647c32aa37c3a460d2c48debff7fc26e1136aee60100a8cd8f68"}, + {file = "rpds_py-0.19.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f2f78ef14077e08856e788fa482107aa602636c16c25bdf59c22ea525a785e9"}, + {file = "rpds_py-0.19.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7cc6cb44f8636fbf4a934ca72f3e786ba3c9f9ba4f4d74611e7da80684e48d2"}, + {file = "rpds_py-0.19.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cf902878b4af334a09de7a45badbff0389e7cf8dc2e4dcf5f07125d0b7c2656d"}, + {file = "rpds_py-0.19.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:688aa6b8aa724db1596514751ffb767766e02e5c4a87486ab36b8e1ebc1aedac"}, + {file = "rpds_py-0.19.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57dbc9167d48e355e2569346b5aa4077f29bf86389c924df25c0a8b9124461fb"}, + {file = "rpds_py-0.19.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b4cf5a9497874822341c2ebe0d5850fed392034caadc0bad134ab6822c0925b"}, + {file = "rpds_py-0.19.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8a790d235b9d39c70a466200d506bb33a98e2ee374a9b4eec7a8ac64c2c261fa"}, + {file = "rpds_py-0.19.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1d16089dfa58719c98a1c06f2daceba6d8e3fb9b5d7931af4a990a3c486241cb"}, + {file = "rpds_py-0.19.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:bc9128e74fe94650367fe23f37074f121b9f796cabbd2f928f13e9661837296d"}, + {file = "rpds_py-0.19.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c8f77e661ffd96ff104bebf7d0f3255b02aa5d5b28326f5408d6284c4a8b3248"}, + {file = "rpds_py-0.19.0-cp312-none-win32.whl", hash = "sha256:5f83689a38e76969327e9b682be5521d87a0c9e5a2e187d2bc6be4765f0d4600"}, + {file = "rpds_py-0.19.0-cp312-none-win_amd64.whl", hash = "sha256:06925c50f86da0596b9c3c64c3837b2481337b83ef3519e5db2701df695453a4"}, + {file = "rpds_py-0.19.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:52e466bea6f8f3a44b1234570244b1cff45150f59a4acae3fcc5fd700c2993ca"}, + {file = "rpds_py-0.19.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e21cc693045fda7f745c790cb687958161ce172ffe3c5719ca1764e752237d16"}, + {file = "rpds_py-0.19.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b31f059878eb1f5da8b2fd82480cc18bed8dcd7fb8fe68370e2e6285fa86da6"}, + {file = "rpds_py-0.19.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1dd46f309e953927dd018567d6a9e2fb84783963650171f6c5fe7e5c41fd5666"}, + {file = "rpds_py-0.19.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34a01a4490e170376cd79258b7f755fa13b1a6c3667e872c8e35051ae857a92b"}, + {file = "rpds_py-0.19.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bcf426a8c38eb57f7bf28932e68425ba86def6e756a5b8cb4731d8e62e4e0223"}, + {file = "rpds_py-0.19.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f68eea5df6347d3f1378ce992d86b2af16ad7ff4dcb4a19ccdc23dea901b87fb"}, + {file = "rpds_py-0.19.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dab8d921b55a28287733263c0e4c7db11b3ee22aee158a4de09f13c93283c62d"}, + {file = "rpds_py-0.19.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6fe87efd7f47266dfc42fe76dae89060038f1d9cb911f89ae7e5084148d1cc08"}, + {file = "rpds_py-0.19.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:535d4b52524a961d220875688159277f0e9eeeda0ac45e766092bfb54437543f"}, + {file = "rpds_py-0.19.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8b1a94b8afc154fbe36978a511a1f155f9bd97664e4f1f7a374d72e180ceb0ae"}, + {file = "rpds_py-0.19.0-cp38-none-win32.whl", hash = "sha256:7c98298a15d6b90c8f6e3caa6457f4f022423caa5fa1a1ca7a5e9e512bdb77a4"}, + {file = "rpds_py-0.19.0-cp38-none-win_amd64.whl", hash = "sha256:b0da31853ab6e58a11db3205729133ce0df26e6804e93079dee095be3d681dc1"}, + {file = "rpds_py-0.19.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5039e3cef7b3e7a060de468a4a60a60a1f31786da94c6cb054e7a3c75906111c"}, + {file = "rpds_py-0.19.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab1932ca6cb8c7499a4d87cb21ccc0d3326f172cfb6a64021a889b591bb3045c"}, + {file = "rpds_py-0.19.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2afd2164a1e85226fcb6a1da77a5c8896c18bfe08e82e8ceced5181c42d2179"}, + {file = "rpds_py-0.19.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b1c30841f5040de47a0046c243fc1b44ddc87d1b12435a43b8edff7e7cb1e0d0"}, + {file = "rpds_py-0.19.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f757f359f30ec7dcebca662a6bd46d1098f8b9fb1fcd661a9e13f2e8ce343ba1"}, + {file = "rpds_py-0.19.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15e65395a59d2e0e96caf8ee5389ffb4604e980479c32742936ddd7ade914b22"}, + {file = "rpds_py-0.19.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb0f6eb3a320f24b94d177e62f4074ff438f2ad9d27e75a46221904ef21a7b05"}, + {file = "rpds_py-0.19.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b228e693a2559888790936e20f5f88b6e9f8162c681830eda303bad7517b4d5a"}, + {file = "rpds_py-0.19.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2575efaa5d949c9f4e2cdbe7d805d02122c16065bfb8d95c129372d65a291a0b"}, + {file = "rpds_py-0.19.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:5c872814b77a4e84afa293a1bee08c14daed1068b2bb1cc312edbf020bbbca2b"}, + {file = "rpds_py-0.19.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:850720e1b383df199b8433a20e02b25b72f0fded28bc03c5bd79e2ce7ef050be"}, + {file = "rpds_py-0.19.0-cp39-none-win32.whl", hash = "sha256:ce84a7efa5af9f54c0aa7692c45861c1667080814286cacb9958c07fc50294fb"}, + {file = "rpds_py-0.19.0-cp39-none-win_amd64.whl", hash = "sha256:1c26da90b8d06227d7769f34915913911222d24ce08c0ab2d60b354e2d9c7aff"}, + {file = "rpds_py-0.19.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:75969cf900d7be665ccb1622a9aba225cf386bbc9c3bcfeeab9f62b5048f4a07"}, + {file = "rpds_py-0.19.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8445f23f13339da640d1be8e44e5baf4af97e396882ebbf1692aecd67f67c479"}, + {file = "rpds_py-0.19.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5a7c1062ef8aea3eda149f08120f10795835fc1c8bc6ad948fb9652a113ca55"}, + {file = "rpds_py-0.19.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:462b0c18fbb48fdbf980914a02ee38c423a25fcc4cf40f66bacc95a2d2d73bc8"}, + {file = "rpds_py-0.19.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3208f9aea18991ac7f2b39721e947bbd752a1abbe79ad90d9b6a84a74d44409b"}, + {file = "rpds_py-0.19.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3444fe52b82f122d8a99bf66777aed6b858d392b12f4c317da19f8234db4533"}, + {file = "rpds_py-0.19.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88cb4bac7185a9f0168d38c01d7a00addece9822a52870eee26b8d5b61409213"}, + {file = "rpds_py-0.19.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6b130bd4163c93798a6b9bb96be64a7c43e1cec81126ffa7ffaa106e1fc5cef5"}, + {file = "rpds_py-0.19.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:a707b158b4410aefb6b054715545bbb21aaa5d5d0080217290131c49c2124a6e"}, + {file = "rpds_py-0.19.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:dc9ac4659456bde7c567107556ab065801622396b435a3ff213daef27b495388"}, + {file = "rpds_py-0.19.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:81ea573aa46d3b6b3d890cd3c0ad82105985e6058a4baed03cf92518081eec8c"}, + {file = "rpds_py-0.19.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f148c3f47f7f29a79c38cc5d020edcb5ca780020fab94dbc21f9af95c463581"}, + {file = "rpds_py-0.19.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0906357f90784a66e89ae3eadc2654f36c580a7d65cf63e6a616e4aec3a81be"}, + {file = "rpds_py-0.19.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f629ecc2db6a4736b5ba95a8347b0089240d69ad14ac364f557d52ad68cf94b0"}, + {file = "rpds_py-0.19.0-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6feacd1d178c30e5bc37184526e56740342fd2aa6371a28367bad7908d454fc"}, + {file = "rpds_py-0.19.0-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae8b6068ee374fdfab63689be0963333aa83b0815ead5d8648389a8ded593378"}, + {file = "rpds_py-0.19.0-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78d57546bad81e0da13263e4c9ce30e96dcbe720dbff5ada08d2600a3502e526"}, + {file = "rpds_py-0.19.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8b6683a37338818646af718c9ca2a07f89787551057fae57c4ec0446dc6224b"}, + {file = "rpds_py-0.19.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e8481b946792415adc07410420d6fc65a352b45d347b78fec45d8f8f0d7496f0"}, + {file = "rpds_py-0.19.0-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:bec35eb20792ea64c3c57891bc3ca0bedb2884fbac2c8249d9b731447ecde4fa"}, + {file = "rpds_py-0.19.0-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:aa5476c3e3a402c37779e95f7b4048db2cb5b0ed0b9d006983965e93f40fe05a"}, + {file = "rpds_py-0.19.0-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:19d02c45f2507b489fd4df7b827940f1420480b3e2e471e952af4d44a1ea8e34"}, + {file = "rpds_py-0.19.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a3e2fd14c5d49ee1da322672375963f19f32b3d5953f0615b175ff7b9d38daed"}, + {file = "rpds_py-0.19.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:93a91c2640645303e874eada51f4f33351b84b351a689d470f8108d0e0694210"}, + {file = "rpds_py-0.19.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5b9fc03bf76a94065299d4a2ecd8dfbae4ae8e2e8098bbfa6ab6413ca267709"}, + {file = "rpds_py-0.19.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5a4b07cdf3f84310c08c1de2c12ddadbb7a77568bcb16e95489f9c81074322ed"}, + {file = "rpds_py-0.19.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba0ed0dc6763d8bd6e5de5cf0d746d28e706a10b615ea382ac0ab17bb7388633"}, + {file = "rpds_py-0.19.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:474bc83233abdcf2124ed3f66230a1c8435896046caa4b0b5ab6013c640803cc"}, + {file = "rpds_py-0.19.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:329c719d31362355a96b435f4653e3b4b061fcc9eba9f91dd40804ca637d914e"}, + {file = "rpds_py-0.19.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef9101f3f7b59043a34f1dccbb385ca760467590951952d6701df0da9893ca0c"}, + {file = "rpds_py-0.19.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:0121803b0f424ee2109d6e1f27db45b166ebaa4b32ff47d6aa225642636cd834"}, + {file = "rpds_py-0.19.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:8344127403dea42f5970adccf6c5957a71a47f522171fafaf4c6ddb41b61703a"}, + {file = "rpds_py-0.19.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:443cec402ddd650bb2b885113e1dcedb22b1175c6be223b14246a714b61cd521"}, + {file = "rpds_py-0.19.0.tar.gz", hash = "sha256:4fdc9afadbeb393b4bbbad75481e0ea78e4469f2e1d713a90811700830b553a9"}, ] [[package]] name = "setuptools" -version = "70.1.1" +version = "70.3.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-70.1.1-py3-none-any.whl", hash = "sha256:a58a8fde0541dab0419750bcc521fbdf8585f6e5cb41909df3a472ef7b81ca95"}, - {file = "setuptools-70.1.1.tar.gz", hash = "sha256:937a48c7cdb7a21eb53cd7f9b59e525503aa8abaf3584c730dc5f7a5bec3a650"}, + {file = "setuptools-70.3.0-py3-none-any.whl", hash = "sha256:fe384da74336c398e0d956d1cae0669bc02eed936cdb1d49b57de1990dc11ffc"}, + {file = "setuptools-70.3.0.tar.gz", hash = "sha256:f171bab1dfbc86b132997f26a119f6056a57950d058587841a0082e8830f9dc5"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.10.0)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.10.0)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "six" @@ -3209,13 +3430,13 @@ files = [ [[package]] name = "tomlkit" -version = "0.12.5" +version = "0.13.0" description = "Style preserving TOML library" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomlkit-0.12.5-py3-none-any.whl", hash = "sha256:af914f5a9c59ed9d0762c7b64d3b5d5df007448eb9cd2edc8a46b1eafead172f"}, - {file = "tomlkit-0.12.5.tar.gz", hash = "sha256:eef34fba39834d4d6b73c9ba7f3e4d1c417a4e56f89a7e96e090dd0d24b8fb3c"}, + {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, + {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, ] [[package]] @@ -3585,4 +3806,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.11" -content-hash = "f5092a8dc5540c2085559368c90f5a69efe0b1eba468f5545f29194a305b004d" +content-hash = "5d7e7dddc7b3aca84f5263def609a2dee5c7155b940d54cd4a158bb72b2bf496" diff --git a/pyproject.toml b/pyproject.toml index cd71a63..adbd8da 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,6 +35,7 @@ opentelemetry-instrumentation-fastapi = "^0.46b0" wcwidth = "<=0.2.12" azure-monitor-opentelemetry = "^1.6.0" aio-pika = "^9.4.2" +aiohttp = "^3.9.5" [tool.poetry.group.dev.dependencies] pytest = "^7" From abde776cd15d2bd6a1fa243fdd295d39cac354fa Mon Sep 17 00:00:00 2001 From: "francisco.schulz" Date: Thu, 11 Jul 2024 11:55:52 -0400 Subject: [PATCH 14/35] feat(RabbitMQHandler): add async test class --- scripts/test_async_class.py | 166 ++++++++++++++++++++++++++++++++++++ 1 file changed, 166 insertions(+) create mode 100644 scripts/test_async_class.py diff --git a/scripts/test_async_class.py b/scripts/test_async_class.py new file mode 100644 index 0000000..f9eee3e --- /dev/null +++ b/scripts/test_async_class.py @@ -0,0 +1,166 @@ +import asyncio +import aiohttp +from aio_pika import connect_robust, ExchangeType, Message +from aio_pika.abc import AbstractIncomingMessage +import json +from logging import getLogger +from pyinfra.config.loader import load_settings, local_pyinfra_root_path +import requests + +logger = getLogger(__name__) +logger.setLevel("DEBUG") + +settings = load_settings(local_pyinfra_root_path / "config/") + + +class RabbitMQHandler: + def __init__(self, connection_params, tenant_service_url): + self.connection_params = connection_params + self.tenant_service_url = tenant_service_url + # TODO: remove hardcoded values + self.input_queue_prefix = "service_request_queue" + self.tenant_exchange_name = "tenants-exchange" + self.service_request_exchange_name = "service_request_exchange" # INPUT + self.service_response_exchange_name = "service_response_exchange" # OUTPUT + self.service_dead_letter_queue_name = "service_dlq" + self.connection = None + self.channel = None + self.tenant_exchange = None + self.input_exchange = None + self.output_exchange = None + self.tenant_queues = {} + + async def connect(self): + self.connection = await connect_robust(**self.connection_params) + self.channel = await self.connection.channel() + + # Declare exchanges + self.tenant_exchange = await self.channel.declare_exchange( + self.tenant_exchange_name, ExchangeType.TOPIC, durable=True + ) + self.input_exchange = await self.channel.declare_exchange( + self.service_request_exchange_name, ExchangeType.DIRECT, durable=True + ) + self.output_exchange = await self.channel.declare_exchange( + self.service_response_exchange_name, ExchangeType.DIRECT, durable=True + ) + + async def setup_tenant_queue(self): + queue = await self.channel.declare_queue( + "tenant_queue", + durable=True, + arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.service_dead_letter_queue_name, + }, + ) + await queue.bind(self.tenant_exchange, routing_key="tenant.*") + await queue.consume(self.process_tenant_message) + + async def process_tenant_message(self, message: AbstractIncomingMessage): + async with message.process(): + message_body = json.loads(message.body.decode()) + print(message_body) + logger.debug(f"input message: {message_body}") + tenant_id = message_body["queue_name"] + routing_key = message.routing_key + + if routing_key == "tenant.create": + await self.create_tenant_queues(tenant_id) + elif routing_key == "tenant.delete": + await self.delete_tenant_queues(tenant_id) + + async def create_tenant_queues(self, tenant_id): + # Create and bind input queue + queue_name = f"{self.input_queue_prefix}_{tenant_id}" + print(f"queue declared: {queue_name}") + input_queue = await self.channel.declare_queue( + queue_name, + durable=True, + arguments={ + "x-dead-letter-exchange": "", + "x-dead-letter-routing-key": self.service_dead_letter_queue_name, + "x-expires": self.queue_expiration_time, + "x-max-priority": 2, + }, + ) + await input_queue.bind(self.input_exchange, routing_key=tenant_id) + await input_queue.consume(self.process_input_message) + + # Store queues for later use + self.tenant_queues[tenant_id] = input_queue + print(f"Created queues for tenant {tenant_id}") + + async def delete_tenant_queues(self, tenant_id): + if tenant_id in self.tenant_queues: + input_queue = self.tenant_queues[tenant_id] + await input_queue.delete() + del self.tenant_queues[tenant_id] + print(f"Deleted queues for tenant {tenant_id}") + + async def process_input_message(self, message: AbstractIncomingMessage): + async with message.process(): + message_body = json.loads(message.body.decode()) + logger.debug(f"input message: {message_body}") + # Process the incoming message + processed_content = f"Processed: {message_body}" + + # TODO: add additional processing logic here + # ... + + # Publish to the output queue + tenant_id = message.routing_key + await self.output_exchange.publish(Message(body=processed_content.encode()), routing_key=tenant_id) + + # FIXME: coroutine error + async def fetch_active_tenants(self): + async with aiohttp.ClientSession() as session: + async with session.get(self.tenant_service_url) as response: + if response.status == 200 and response.headers["content-type"].lower() == "application/json": + tenants = {await tenant["tenantId"] for tenant in response.json()} + return await tenants + else: + print(f"Failed to fetch active tenants. Status: {response.status}") + return set() + + # TODO: remove after fetch_active_tenants is fixed + def get_initial_tenant_ids(self) -> set: + response = requests.get(self.tenant_service_url, timeout=10) + response.raise_for_status() # Raise an HTTPError for bad responses + + if response.headers["content-type"].lower() == "application/json": + tenants = {tenant["tenantId"] for tenant in response.json()} + return tenants + return set() + + async def initialize_tenant_queues(self): + active_tenants = self.get_initial_tenant_ids() + for tenant_id in active_tenants: + await self.create_tenant_queues(tenant_id) + + async def run(self): + await self.connect() + await self.initialize_tenant_queues() + await self.setup_tenant_queue() + print("RabbitMQ handler is running. Press CTRL+C to exit.") + try: + await asyncio.Future() # Run forever + finally: + await self.connection.close() + + +async def main(): + connection_params = { + "host": settings.rabbitmq.host, + "port": settings.rabbitmq.port, + "login": settings.rabbitmq.username, + "password": settings.rabbitmq.password, + "client_properties": {"heartbeat": settings.rabbitmq.heartbeat}, + } + tenant_service_url = "http://localhost:8080/internal/tenants" + handler = RabbitMQHandler(connection_params, tenant_service_url) + await handler.run() + + +if __name__ == "__main__": + asyncio.run(main()) From f723bcb9b1a0e0decb3954d95c2feb7fb540ed84 Mon Sep 17 00:00:00 2001 From: "francisco.schulz" Date: Thu, 11 Jul 2024 12:06:59 -0400 Subject: [PATCH 15/35] fix(fetch_active_tenants): propper async API call --- scripts/test_async_class.py | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/scripts/test_async_class.py b/scripts/test_async_class.py index f9eee3e..3ac82e7 100644 --- a/scripts/test_async_class.py +++ b/scripts/test_async_class.py @@ -1,4 +1,5 @@ import asyncio +from typing import Set import aiohttp from aio_pika import connect_robust, ExchangeType, Message from aio_pika.abc import AbstractIncomingMessage @@ -23,6 +24,7 @@ class RabbitMQHandler: self.service_request_exchange_name = "service_request_exchange" # INPUT self.service_response_exchange_name = "service_response_exchange" # OUTPUT self.service_dead_letter_queue_name = "service_dlq" + self.queue_expiration_time = 300000 self.connection = None self.channel = None self.tenant_exchange = None @@ -112,29 +114,22 @@ class RabbitMQHandler: tenant_id = message.routing_key await self.output_exchange.publish(Message(body=processed_content.encode()), routing_key=tenant_id) - # FIXME: coroutine error - async def fetch_active_tenants(self): - async with aiohttp.ClientSession() as session: - async with session.get(self.tenant_service_url) as response: - if response.status == 200 and response.headers["content-type"].lower() == "application/json": - tenants = {await tenant["tenantId"] for tenant in response.json()} - return await tenants - else: - print(f"Failed to fetch active tenants. Status: {response.status}") - return set() - - # TODO: remove after fetch_active_tenants is fixed - def get_initial_tenant_ids(self) -> set: - response = requests.get(self.tenant_service_url, timeout=10) - response.raise_for_status() # Raise an HTTPError for bad responses - - if response.headers["content-type"].lower() == "application/json": - tenants = {tenant["tenantId"] for tenant in response.json()} - return tenants - return set() + async def fetch_active_tenants(self) -> Set[str]: + try: + async with aiohttp.ClientSession() as session: + async with session.get(self.tenant_service_url) as response: + if response.status == 200: + data = await response.json() + return {tenant["tenantId"] for tenant in data} + else: + logger.error(f"Failed to fetch active tenants. Status: {response.status}") + return set() + except aiohttp.ClientError as e: + logger.error(f"Error fetching active tenants: {e}") + return set() async def initialize_tenant_queues(self): - active_tenants = self.get_initial_tenant_ids() + active_tenants = await self.fetch_active_tenants() for tenant_id in active_tenants: await self.create_tenant_queues(tenant_id) From cc25a20c2412339834efbb3cb1f8457a625aec38 Mon Sep 17 00:00:00 2001 From: "francisco.schulz" Date: Thu, 11 Jul 2024 12:21:48 -0400 Subject: [PATCH 16/35] feat(process_input_message): add message processing logic with support to pass in external message processor --- scripts/test_async_class.py | 56 +++++++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 14 deletions(-) diff --git a/scripts/test_async_class.py b/scripts/test_async_class.py index 3ac82e7..95af16b 100644 --- a/scripts/test_async_class.py +++ b/scripts/test_async_class.py @@ -1,7 +1,7 @@ import asyncio -from typing import Set +from typing import Any, Callable, Dict, Set import aiohttp -from aio_pika import connect_robust, ExchangeType, Message +from aio_pika import connect_robust, ExchangeType, Message, IncomingMessage from aio_pika.abc import AbstractIncomingMessage import json from logging import getLogger @@ -15,7 +15,7 @@ settings = load_settings(local_pyinfra_root_path / "config/") class RabbitMQHandler: - def __init__(self, connection_params, tenant_service_url): + def __init__(self, connection_params, tenant_service_url, message_processor): self.connection_params = connection_params self.tenant_service_url = tenant_service_url # TODO: remove hardcoded values @@ -25,6 +25,7 @@ class RabbitMQHandler: self.service_response_exchange_name = "service_response_exchange" # OUTPUT self.service_dead_letter_queue_name = "service_dlq" self.queue_expiration_time = 300000 + self.message_processor = message_processor self.connection = None self.channel = None self.tenant_exchange = None @@ -87,7 +88,7 @@ class RabbitMQHandler: }, ) await input_queue.bind(self.input_exchange, routing_key=tenant_id) - await input_queue.consume(self.process_input_message) + await input_queue.consume(lambda msg: self.process_input_message(msg, self.message_processor)) # Store queues for later use self.tenant_queues[tenant_id] = input_queue @@ -100,19 +101,46 @@ class RabbitMQHandler: del self.tenant_queues[tenant_id] print(f"Deleted queues for tenant {tenant_id}") - async def process_input_message(self, message: AbstractIncomingMessage): + async def process_input_message( + self, message: IncomingMessage, message_processor: Callable[[Dict[str, Any]], Dict[str, Any]] + ) -> None: async with message.process(): - message_body = json.loads(message.body.decode()) - logger.debug(f"input message: {message_body}") - # Process the incoming message - processed_content = f"Processed: {message_body}" + try: + tenant_id = message.routing_key + message_body = json.loads(message.body.decode("utf-8")) - # TODO: add additional processing logic here - # ... + # Extract headers + filtered_message_headers = ( + {k: v for k, v in message.headers.items() if k.lower().startswith("x-")} if message.headers else {} + ) - # Publish to the output queue - tenant_id = message.routing_key - await self.output_exchange.publish(Message(body=processed_content.encode()), routing_key=tenant_id) + logger.debug(f"Processing message with {filtered_message_headers=}.") + + # Process the message + message_body.update(filtered_message_headers) + + # Run the message processor in a separate thread to avoid blocking + loop = asyncio.get_running_loop() + result = await loop.run_in_executor(None, message_processor, message_body) + + if result: + # Publish the result to the output exchange + await self.output_exchange.publish( + Message(body=json.dumps(result).encode(), headers=filtered_message_headers), + routing_key=tenant_id, + ) + logger.info(f"Published result to queue {tenant_id}.") + + except json.JSONDecodeError: + logger.error(f"Invalid JSON in input message: {message.body}") + # Message will be nacked and sent to dead-letter queue + except FileNotFoundError as e: + logger.warning(f"{e}, declining message.") + # Message will be nacked and sent to dead-letter queue + except Exception as e: + logger.error(f"Error processing input message: {e}", exc_info=True) + # Message will be nacked and sent to dead-letter queue + raise async def fetch_active_tenants(self) -> Set[str]: try: From 5ff65f2cf40195d16e51c001559ce45d91c0c9cb Mon Sep 17 00:00:00 2001 From: "francisco.schulz" Date: Thu, 11 Jul 2024 14:46:41 -0400 Subject: [PATCH 17/35] feat(tests): add RabbitMQHandler class tests --- scripts/test_async_class.py | 128 +++++++++++++++++++++++++++++++++--- 1 file changed, 119 insertions(+), 9 deletions(-) diff --git a/scripts/test_async_class.py b/scripts/test_async_class.py index 95af16b..8a66d8f 100644 --- a/scripts/test_async_class.py +++ b/scripts/test_async_class.py @@ -1,4 +1,6 @@ import asyncio +import gzip +from operator import itemgetter from typing import Any, Callable, Dict, Set import aiohttp from aio_pika import connect_robust, ExchangeType, Message, IncomingMessage @@ -6,7 +8,7 @@ from aio_pika.abc import AbstractIncomingMessage import json from logging import getLogger from pyinfra.config.loader import load_settings, local_pyinfra_root_path -import requests +from pyinfra.storage.storages.s3 import get_s3_storage_from_settings logger = getLogger(__name__) logger.setLevel("DEBUG") @@ -65,7 +67,7 @@ class RabbitMQHandler: message_body = json.loads(message.body.decode()) print(message_body) logger.debug(f"input message: {message_body}") - tenant_id = message_body["queue_name"] + tenant_id = message_body["tenantId"] routing_key = message.routing_key if routing_key == "tenant.create": @@ -125,11 +127,7 @@ class RabbitMQHandler: if result: # Publish the result to the output exchange - await self.output_exchange.publish( - Message(body=json.dumps(result).encode(), headers=filtered_message_headers), - routing_key=tenant_id, - ) - logger.info(f"Published result to queue {tenant_id}.") + self.publish_to_output_exchange(tenant_id, result, filtered_message_headers) except json.JSONDecodeError: logger.error(f"Invalid JSON in input message: {message.body}") @@ -142,6 +140,13 @@ class RabbitMQHandler: # Message will be nacked and sent to dead-letter queue raise + async def publish_to_output_exchange(self, tenant_id: str, result: Dict[str, Any], headers: Dict[str, Any]): + await self.output_exchange.publish( + Message(body=json.dumps(result).encode(), headers=headers), + routing_key=tenant_id, + ) + logger.info(f"Published result to queue {tenant_id}.") + async def fetch_active_tenants(self) -> Set[str]: try: async with aiohttp.ClientSession() as session: @@ -181,9 +186,114 @@ async def main(): "client_properties": {"heartbeat": settings.rabbitmq.heartbeat}, } tenant_service_url = "http://localhost:8080/internal/tenants" - handler = RabbitMQHandler(connection_params, tenant_service_url) + handler = RabbitMQHandler(connection_params, tenant_service_url, dummy_message_processor) await handler.run() +######################################################################## +def upload_json_and_make_message_body(tenant_id: str) -> Dict[str, Any]: + dossier_id, file_id, suffix = "dossier", "file", "json.gz" + content = { + "numberOfPages": 7, + "sectionTexts": "data", + } + + object_name = f"{tenant_id}/{dossier_id}/{file_id}.{suffix}" + data = gzip.compress(json.dumps(content).encode("utf-8")) + + storage = get_s3_storage_from_settings(settings) + if not storage.has_bucket(): + storage.make_bucket() + storage.put_object(object_name, data) + + message_body = { + "tenantId": tenant_id, + "dossierId": dossier_id, + "fileId": file_id, + "targetFileExtension": suffix, + "responseFileExtension": f"result.{suffix}", + } + return message_body + + +def tenant_event_message(tenant_id: str) -> Dict[str, str]: + return {"tenantId": tenant_id} + + +async def dummy_message_processor(message: Dict[str, Any]) -> Dict[str, Any]: + logger.info(f"Processing message: {message}") + await asyncio.sleep(1) # Simulate processing time + + storage = get_s3_storage_from_settings(settings) + tenant_id, dossier_id, file_id = itemgetter("tenantId", "dossierId", "fileId")(message) + suffix = message["responseFileExtension"] + + # Simulate processing by modifying the original content + object_name = f"{tenant_id}/{dossier_id}/{file_id}.{message['targetFileExtension']}" + original_content = json.loads(gzip.decompress(storage.get_object(object_name))) + processed_content = { + "processedPages": original_content["numberOfPages"], + "processedSectionTexts": f"Processed: {original_content['sectionTexts']}", + } + + # Save processed content + processed_object_name = f"{tenant_id}/{dossier_id}/{file_id}.{suffix}" + processed_data = gzip.compress(json.dumps(processed_content).encode("utf-8")) + storage.put_object(processed_object_name, processed_data) + + processed_message = message.copy() + processed_message["processed"] = True + processed_message["processor_message"] = "This message was processed by the dummy processor" + + logger.info(f"Finished processing message. Result: {processed_message}") + return processed_message + + +async def test_rabbitmq_handler(): + connection_params = { + "host": settings.rabbitmq.host, + "port": settings.rabbitmq.port, + "login": settings.rabbitmq.username, + "password": settings.rabbitmq.password, + "client_properties": {"heartbeat": settings.rabbitmq.heartbeat}, + } + tenant_service_url = "http://localhost:8080/internal/tenants" + handler = RabbitMQHandler(connection_params, tenant_service_url, dummy_message_processor) + + await handler.connect() + await handler.setup_tenant_queue() + + # Test tenant creation + tenant_id = "test_tenant" + create_message = tenant_event_message(tenant_id) + await handler.tenant_exchange.publish( + Message(body=json.dumps(create_message).encode()), routing_key="tenant.create" + ) + logger.info(f"Sent create tenant message for {tenant_id}") + + # Wait for tenant queue creation + await asyncio.sleep(2) + + # Test service request + service_request = upload_json_and_make_message_body(tenant_id) + await handler.input_exchange.publish(Message(body=json.dumps(service_request).encode()), routing_key=tenant_id) + logger.info(f"Sent service request for {tenant_id}") + + # Wait for message processing + await asyncio.sleep(5) + + # Test tenant deletion + delete_message = tenant_event_message(tenant_id) + await handler.tenant_exchange.publish( + Message(body=json.dumps(delete_message).encode()), routing_key="tenant.delete" + ) + logger.info(f"Sent delete tenant message for {tenant_id}") + + # Wait for tenant queue deletion + await asyncio.sleep(2) + + await handler.connection.close() + + if __name__ == "__main__": - asyncio.run(main()) + asyncio.run(test_rabbitmq_handler()) From 75591188227c40120fb32b22192a866261e09460 Mon Sep 17 00:00:00 2001 From: "francisco.schulz" Date: Thu, 11 Jul 2024 14:50:11 -0400 Subject: [PATCH 18/35] fix: remove sleep commands --- scripts/test_async_class.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/scripts/test_async_class.py b/scripts/test_async_class.py index 8a66d8f..d5cd616 100644 --- a/scripts/test_async_class.py +++ b/scripts/test_async_class.py @@ -261,6 +261,7 @@ async def test_rabbitmq_handler(): handler = RabbitMQHandler(connection_params, tenant_service_url, dummy_message_processor) await handler.connect() + await handler.initialize_tenant_queues() await handler.setup_tenant_queue() # Test tenant creation @@ -271,17 +272,11 @@ async def test_rabbitmq_handler(): ) logger.info(f"Sent create tenant message for {tenant_id}") - # Wait for tenant queue creation - await asyncio.sleep(2) - # Test service request service_request = upload_json_and_make_message_body(tenant_id) await handler.input_exchange.publish(Message(body=json.dumps(service_request).encode()), routing_key=tenant_id) logger.info(f"Sent service request for {tenant_id}") - # Wait for message processing - await asyncio.sleep(5) - # Test tenant deletion delete_message = tenant_event_message(tenant_id) await handler.tenant_exchange.publish( @@ -289,9 +284,6 @@ async def test_rabbitmq_handler(): ) logger.info(f"Sent delete tenant message for {tenant_id}") - # Wait for tenant queue deletion - await asyncio.sleep(2) - await handler.connection.close() From f9aec74d55e6006e3c2bd11d38e490d5102ceff5 Mon Sep 17 00:00:00 2001 From: "francisco.schulz" Date: Thu, 11 Jul 2024 15:54:21 -0400 Subject: [PATCH 19/35] chore: clean up + improve robustness --- scripts/test_async_class.py | 235 +++++++++++++++++------------------- 1 file changed, 113 insertions(+), 122 deletions(-) diff --git a/scripts/test_async_class.py b/scripts/test_async_class.py index d5cd616..d3fe7d9 100644 --- a/scripts/test_async_class.py +++ b/scripts/test_async_class.py @@ -4,69 +4,92 @@ from operator import itemgetter from typing import Any, Callable, Dict, Set import aiohttp from aio_pika import connect_robust, ExchangeType, Message, IncomingMessage -from aio_pika.abc import AbstractIncomingMessage +from aio_pika.abc import AbstractIncomingMessage, AbstractChannel, AbstractConnection, AbstractExchange import json -from logging import getLogger +from kn_utils.logging import logger from pyinfra.config.loader import load_settings, local_pyinfra_root_path from pyinfra.storage.storages.s3 import get_s3_storage_from_settings - -logger = getLogger(__name__) -logger.setLevel("DEBUG") +from dataclasses import dataclass, field settings = load_settings(local_pyinfra_root_path / "config/") +@dataclass +class RabbitMQConfig: + host: str + port: int + username: str + password: str + heartbeat: int + input_queue_prefix: str + tenant_exchange_name: str + service_request_exchange_name: str + service_response_exchange_name: str + service_dead_letter_queue_name: str + queue_expiration_time: int + + connection_params: Dict[str, object] = field(init=False) + + def __post_init__(self): + self.connection_params = { + "host": self.host, + "port": self.port, + "login": self.username, + "password": self.password, + "client_properties": {"heartbeat": self.heartbeat}, + } + + class RabbitMQHandler: - def __init__(self, connection_params, tenant_service_url, message_processor): - self.connection_params = connection_params + def __init__( + self, + config: RabbitMQConfig, + tenant_service_url: str, + message_processor: Callable[[Dict[str, Any]], Dict[str, Any]], + ): + self.config = config self.tenant_service_url = tenant_service_url - # TODO: remove hardcoded values - self.input_queue_prefix = "service_request_queue" - self.tenant_exchange_name = "tenants-exchange" - self.service_request_exchange_name = "service_request_exchange" # INPUT - self.service_response_exchange_name = "service_response_exchange" # OUTPUT - self.service_dead_letter_queue_name = "service_dlq" - self.queue_expiration_time = 300000 self.message_processor = message_processor - self.connection = None - self.channel = None - self.tenant_exchange = None - self.input_exchange = None - self.output_exchange = None - self.tenant_queues = {} - async def connect(self): - self.connection = await connect_robust(**self.connection_params) + self.connection: AbstractConnection | None = None + self.channel: AbstractChannel | None = None + self.tenant_exchange: AbstractExchange | None = None + self.input_exchange: AbstractExchange | None = None + self.output_exchange: AbstractExchange | None = None + self.tenant_queues: Dict[str, AbstractChannel] = {} + + async def connect(self) -> None: + self.connection = await connect_robust(**self.config.connection_params) self.channel = await self.connection.channel() + await self.channel.set_qos(prefetch_count=1) - # Declare exchanges + async def setup_exchanges(self): self.tenant_exchange = await self.channel.declare_exchange( - self.tenant_exchange_name, ExchangeType.TOPIC, durable=True + self.config.tenant_exchange_name, ExchangeType.TOPIC, durable=True ) self.input_exchange = await self.channel.declare_exchange( - self.service_request_exchange_name, ExchangeType.DIRECT, durable=True + self.config.service_request_exchange_name, ExchangeType.DIRECT, durable=True ) self.output_exchange = await self.channel.declare_exchange( - self.service_response_exchange_name, ExchangeType.DIRECT, durable=True + self.config.service_response_exchange_name, ExchangeType.DIRECT, durable=True ) - async def setup_tenant_queue(self): + async def setup_tenant_queue(self) -> None: queue = await self.channel.declare_queue( "tenant_queue", durable=True, arguments={ "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.service_dead_letter_queue_name, + "x-dead-letter-routing-key": self.config.service_dead_letter_queue_name, }, ) await queue.bind(self.tenant_exchange, routing_key="tenant.*") await queue.consume(self.process_tenant_message) - async def process_tenant_message(self, message: AbstractIncomingMessage): + async def process_tenant_message(self, message: AbstractIncomingMessage) -> None: async with message.process(): message_body = json.loads(message.body.decode()) - print(message_body) - logger.debug(f"input message: {message_body}") + logger.debug(f"Tenant message received: {message_body}") tenant_id = message_body["tenantId"] routing_key = message.routing_key @@ -75,72 +98,60 @@ class RabbitMQHandler: elif routing_key == "tenant.delete": await self.delete_tenant_queues(tenant_id) - async def create_tenant_queues(self, tenant_id): - # Create and bind input queue - queue_name = f"{self.input_queue_prefix}_{tenant_id}" - print(f"queue declared: {queue_name}") + async def create_tenant_queues(self, tenant_id: str) -> None: + queue_name = f"{self.config.input_queue_prefix}_{tenant_id}" + logger.info(f"Declaring queue: {queue_name}") input_queue = await self.channel.declare_queue( queue_name, durable=True, arguments={ "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.service_dead_letter_queue_name, - "x-expires": self.queue_expiration_time, + "x-dead-letter-routing-key": self.config.service_dead_letter_queue_name, + "x-expires": self.config.queue_expiration_time, "x-max-priority": 2, }, ) await input_queue.bind(self.input_exchange, routing_key=tenant_id) - await input_queue.consume(lambda msg: self.process_input_message(msg, self.message_processor)) + await input_queue.consume(self.process_input_message) - # Store queues for later use self.tenant_queues[tenant_id] = input_queue - print(f"Created queues for tenant {tenant_id}") + logger.info(f"Created queues for tenant {tenant_id}") - async def delete_tenant_queues(self, tenant_id): + async def delete_tenant_queues(self, tenant_id: str) -> None: if tenant_id in self.tenant_queues: input_queue = self.tenant_queues[tenant_id] await input_queue.delete() del self.tenant_queues[tenant_id] - print(f"Deleted queues for tenant {tenant_id}") + logger.info(f"Deleted queues for tenant {tenant_id}") - async def process_input_message( - self, message: IncomingMessage, message_processor: Callable[[Dict[str, Any]], Dict[str, Any]] - ) -> None: + async def process_input_message(self, message: IncomingMessage) -> None: async with message.process(): try: tenant_id = message.routing_key message_body = json.loads(message.body.decode("utf-8")) - # Extract headers filtered_message_headers = ( {k: v for k, v in message.headers.items() if k.lower().startswith("x-")} if message.headers else {} ) logger.debug(f"Processing message with {filtered_message_headers=}.") - # Process the message message_body.update(filtered_message_headers) - # Run the message processor in a separate thread to avoid blocking - loop = asyncio.get_running_loop() - result = await loop.run_in_executor(None, message_processor, message_body) + result = await self.message_processor(message_body) if result: - # Publish the result to the output exchange - self.publish_to_output_exchange(tenant_id, result, filtered_message_headers) + await self.publish_to_output_exchange(tenant_id, result, filtered_message_headers) except json.JSONDecodeError: logger.error(f"Invalid JSON in input message: {message.body}") - # Message will be nacked and sent to dead-letter queue except FileNotFoundError as e: logger.warning(f"{e}, declining message.") - # Message will be nacked and sent to dead-letter queue except Exception as e: logger.error(f"Error processing input message: {e}", exc_info=True) - # Message will be nacked and sent to dead-letter queue raise - async def publish_to_output_exchange(self, tenant_id: str, result: Dict[str, Any], headers: Dict[str, Any]): + async def publish_to_output_exchange(self, tenant_id: str, result: Dict[str, Any], headers: Dict[str, Any]) -> None: await self.output_exchange.publish( Message(body=json.dumps(result).encode(), headers=headers), routing_key=tenant_id, @@ -161,63 +172,26 @@ class RabbitMQHandler: logger.error(f"Error fetching active tenants: {e}") return set() - async def initialize_tenant_queues(self): + async def initialize_tenant_queues(self) -> None: active_tenants = await self.fetch_active_tenants() for tenant_id in active_tenants: await self.create_tenant_queues(tenant_id) - async def run(self): - await self.connect() - await self.initialize_tenant_queues() - await self.setup_tenant_queue() - print("RabbitMQ handler is running. Press CTRL+C to exit.") + async def run(self) -> None: try: + await self.connect() + await self.setup_exchanges() + await self.initialize_tenant_queues() + await self.setup_tenant_queue() + logger.info("RabbitMQ handler is running. Press CTRL+C to exit.") await asyncio.Future() # Run forever + except asyncio.CancelledError: + logger.info("Shutting down RabbitMQ handler...") + except Exception as e: + logger.error(f"An error occurred: {e}", exc_info=True) finally: - await self.connection.close() - - -async def main(): - connection_params = { - "host": settings.rabbitmq.host, - "port": settings.rabbitmq.port, - "login": settings.rabbitmq.username, - "password": settings.rabbitmq.password, - "client_properties": {"heartbeat": settings.rabbitmq.heartbeat}, - } - tenant_service_url = "http://localhost:8080/internal/tenants" - handler = RabbitMQHandler(connection_params, tenant_service_url, dummy_message_processor) - await handler.run() - - -######################################################################## -def upload_json_and_make_message_body(tenant_id: str) -> Dict[str, Any]: - dossier_id, file_id, suffix = "dossier", "file", "json.gz" - content = { - "numberOfPages": 7, - "sectionTexts": "data", - } - - object_name = f"{tenant_id}/{dossier_id}/{file_id}.{suffix}" - data = gzip.compress(json.dumps(content).encode("utf-8")) - - storage = get_s3_storage_from_settings(settings) - if not storage.has_bucket(): - storage.make_bucket() - storage.put_object(object_name, data) - - message_body = { - "tenantId": tenant_id, - "dossierId": dossier_id, - "fileId": file_id, - "targetFileExtension": suffix, - "responseFileExtension": f"result.{suffix}", - } - return message_body - - -def tenant_event_message(tenant_id: str) -> Dict[str, str]: - return {"tenantId": tenant_id} + if self.connection: + await self.connection.close() async def dummy_message_processor(message: Dict[str, Any]) -> Dict[str, Any]: @@ -228,7 +202,6 @@ async def dummy_message_processor(message: Dict[str, Any]) -> Dict[str, Any]: tenant_id, dossier_id, file_id = itemgetter("tenantId", "dossierId", "fileId")(message) suffix = message["responseFileExtension"] - # Simulate processing by modifying the original content object_name = f"{tenant_id}/{dossier_id}/{file_id}.{message['targetFileExtension']}" original_content = json.loads(gzip.decompress(storage.get_object(object_name))) processed_content = { @@ -236,7 +209,6 @@ async def dummy_message_processor(message: Dict[str, Any]) -> Dict[str, Any]: "processedSectionTexts": f"Processed: {original_content['sectionTexts']}", } - # Save processed content processed_object_name = f"{tenant_id}/{dossier_id}/{file_id}.{suffix}" processed_data = gzip.compress(json.dumps(processed_content).encode("utf-8")) storage.put_object(processed_object_name, processed_data) @@ -249,40 +221,59 @@ async def dummy_message_processor(message: Dict[str, Any]) -> Dict[str, Any]: return processed_message -async def test_rabbitmq_handler(): - connection_params = { - "host": settings.rabbitmq.host, - "port": settings.rabbitmq.port, - "login": settings.rabbitmq.username, - "password": settings.rabbitmq.password, - "client_properties": {"heartbeat": settings.rabbitmq.heartbeat}, - } - tenant_service_url = "http://localhost:8080/internal/tenants" - handler = RabbitMQHandler(connection_params, tenant_service_url, dummy_message_processor) +async def test_rabbitmq_handler() -> None: + tenant_service_url = settings.storage.tenant_server.endpoint + + config = RabbitMQConfig( + host=settings.rabbitmq.host, + port=settings.rabbitmq.port, + username=settings.rabbitmq.username, + password=settings.rabbitmq.password, + heartbeat=settings.rabbitmq.heartbeat, + input_queue_prefix=settings.rabbitmq.service_request_queue_prefix, + tenant_exchange_name=settings.rabbitmq.service_response_queue_prefix, + service_request_exchange_name=settings.rabbitmq.service_request_exchange_name, + service_response_exchange_name=settings.rabbitmq.service_response_exchange_name, + service_dead_letter_queue_name=settings.rabbitmq.service_dlq_name, + queue_expiration_time=settings.rabbitmq.queue_expiration_time, + ) + + handler = RabbitMQHandler(config, tenant_service_url, dummy_message_processor) await handler.connect() + await handler.setup_exchanges() await handler.initialize_tenant_queues() await handler.setup_tenant_queue() - # Test tenant creation tenant_id = "test_tenant" - create_message = tenant_event_message(tenant_id) + + # Test tenant creation + create_message = {"tenantId": tenant_id} await handler.tenant_exchange.publish( Message(body=json.dumps(create_message).encode()), routing_key="tenant.create" ) logger.info(f"Sent create tenant message for {tenant_id}") + await asyncio.sleep(2) # Wait for queue creation # Test service request - service_request = upload_json_and_make_message_body(tenant_id) + service_request = { + "tenantId": tenant_id, + "dossierId": "dossier", + "fileId": "file", + "targetFileExtension": "json.gz", + "responseFileExtension": "result.json.gz", + } await handler.input_exchange.publish(Message(body=json.dumps(service_request).encode()), routing_key=tenant_id) logger.info(f"Sent service request for {tenant_id}") + await asyncio.sleep(5) # Wait for message processing # Test tenant deletion - delete_message = tenant_event_message(tenant_id) + delete_message = {"tenantId": tenant_id} await handler.tenant_exchange.publish( Message(body=json.dumps(delete_message).encode()), routing_key="tenant.delete" ) logger.info(f"Sent delete tenant message for {tenant_id}") + await asyncio.sleep(2) # Wait for queue deletion await handler.connection.close() From a5162d5bf016972e7b1c6a95b255f5de51ffcae4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Fri, 12 Jul 2024 12:10:31 +0200 Subject: [PATCH 20/35] chore: update poetry deps --- .pre-commit-config.yaml | 50 ++++++++++------- poetry.lock | 116 +++++++++++++++++++--------------------- 2 files changed, 87 insertions(+), 79 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0177c31..6cc462c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,42 +1,54 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks -exclude: ^(docs/|notebooks/|data/|src/secrets/|src/static/|src/templates/|tests) +exclude: ^(docs/|notebooks/|data/|src/configs/|tests/|.hooks/) default_language_version: python: python3.10 repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - exclude: bamboo-specs/bamboo.yml + name: Check Gitlab CI (unsafe) + args: [--unsafe] + files: .gitlab-ci.yml + - id: check-yaml + exclude: .gitlab-ci.yml + - id: check-toml + - id: detect-private-key + - id: check-added-large-files + args: ['--maxkb=10000'] + - id: check-case-conflict + - id: mixed-line-ending - # - repo: https://github.com/pycqa/pylint - # rev: v2.16.1 - # hooks: - # - id: pylint - # args: - # ["--max-line-length=120", "--errors-only", "--ignore-imports=true", ] + - repo: https://github.com/pre-commit/mirrors-pylint + rev: v3.0.0a5 + hooks: + - id: pylint + args: + - --disable=C0111,R0903 + - --max-line-length=120 - repo: https://github.com/pre-commit/mirrors-isort rev: v5.10.1 hooks: - id: isort - args: ["--profile", "black"] + args: + - --profile black - repo: https://github.com/psf/black - rev: 23.12.1 + rev: 24.4.2 hooks: - id: black # exclude: ^(docs/|notebooks/|data/|src/secrets/) args: - --line-length=120 -# - repo: local -# hooks: -# - id: system -# name: PyLint -# entry: poetry run pylint -# language: system -# exclude: ^alembic/ -# files: \.py$ + + - repo: https://github.com/compilerla/conventional-pre-commit + rev: v3.2.0 + hooks: + - id: conventional-pre-commit + pass_filenames: false + stages: [commit-msg] + # args: [] # optional: list of Conventional Commits types to allow e.g. [feat, fix, ci, chore, test] diff --git a/poetry.lock b/poetry.lock index 4c2216e..51bcd1a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -279,13 +279,13 @@ tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"] [[package]] name = "astroid" -version = "3.2.2" +version = "3.2.3" description = "An abstract syntax tree for Python with inference support." optional = false python-versions = ">=3.8.0" files = [ - {file = "astroid-3.2.2-py3-none-any.whl", hash = "sha256:e8a0083b4bb28fcffb6207a3bfc9e5d0a68be951dd7e336d5dcf639c682388c0"}, - {file = "astroid-3.2.2.tar.gz", hash = "sha256:8ead48e31b92b2e217b6c9733a21afafe479d52d6e164dd25fb1a770c7c3cf94"}, + {file = "astroid-3.2.3-py3-none-any.whl", hash = "sha256:3eae9ea67c11c858cdd2c91337d2e816bd019ac897ca07d7b346ac10105fceb3"}, + {file = "astroid-3.2.3.tar.gz", hash = "sha256:7099b5a60985529d8d46858befa103b82d0d05a5a5e8b816b5303ed96075e1d9"}, ] [package.dependencies] @@ -734,63 +734,63 @@ test = ["pytest"] [[package]] name = "coverage" -version = "7.5.4" +version = "7.6.0" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6cfb5a4f556bb51aba274588200a46e4dd6b505fb1a5f8c5ae408222eb416f99"}, - {file = "coverage-7.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2174e7c23e0a454ffe12267a10732c273243b4f2d50d07544a91198f05c48f47"}, - {file = "coverage-7.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2214ee920787d85db1b6a0bd9da5f8503ccc8fcd5814d90796c2f2493a2f4d2e"}, - {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1137f46adb28e3813dec8c01fefadcb8c614f33576f672962e323b5128d9a68d"}, - {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b385d49609f8e9efc885790a5a0e89f2e3ae042cdf12958b6034cc442de428d3"}, - {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b4a474f799456e0eb46d78ab07303286a84a3140e9700b9e154cfebc8f527016"}, - {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5cd64adedf3be66f8ccee418473c2916492d53cbafbfcff851cbec5a8454b136"}, - {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e564c2cf45d2f44a9da56f4e3a26b2236504a496eb4cb0ca7221cd4cc7a9aca9"}, - {file = "coverage-7.5.4-cp310-cp310-win32.whl", hash = "sha256:7076b4b3a5f6d2b5d7f1185fde25b1e54eb66e647a1dfef0e2c2bfaf9b4c88c8"}, - {file = "coverage-7.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:018a12985185038a5b2bcafab04ab833a9a0f2c59995b3cec07e10074c78635f"}, - {file = "coverage-7.5.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:db14f552ac38f10758ad14dd7b983dbab424e731588d300c7db25b6f89e335b5"}, - {file = "coverage-7.5.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3257fdd8e574805f27bb5342b77bc65578e98cbc004a92232106344053f319ba"}, - {file = "coverage-7.5.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a6612c99081d8d6134005b1354191e103ec9705d7ba2754e848211ac8cacc6b"}, - {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d45d3cbd94159c468b9b8c5a556e3f6b81a8d1af2a92b77320e887c3e7a5d080"}, - {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed550e7442f278af76d9d65af48069f1fb84c9f745ae249c1a183c1e9d1b025c"}, - {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7a892be37ca35eb5019ec85402c3371b0f7cda5ab5056023a7f13da0961e60da"}, - {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8192794d120167e2a64721d88dbd688584675e86e15d0569599257566dec9bf0"}, - {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:820bc841faa502e727a48311948e0461132a9c8baa42f6b2b84a29ced24cc078"}, - {file = "coverage-7.5.4-cp311-cp311-win32.whl", hash = "sha256:6aae5cce399a0f065da65c7bb1e8abd5c7a3043da9dceb429ebe1b289bc07806"}, - {file = "coverage-7.5.4-cp311-cp311-win_amd64.whl", hash = "sha256:d2e344d6adc8ef81c5a233d3a57b3c7d5181f40e79e05e1c143da143ccb6377d"}, - {file = "coverage-7.5.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:54317c2b806354cbb2dc7ac27e2b93f97096912cc16b18289c5d4e44fc663233"}, - {file = "coverage-7.5.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:042183de01f8b6d531e10c197f7f0315a61e8d805ab29c5f7b51a01d62782747"}, - {file = "coverage-7.5.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6bb74ed465d5fb204b2ec41d79bcd28afccf817de721e8a807d5141c3426638"}, - {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3d45ff86efb129c599a3b287ae2e44c1e281ae0f9a9bad0edc202179bcc3a2e"}, - {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5013ed890dc917cef2c9f765c4c6a8ae9df983cd60dbb635df8ed9f4ebc9f555"}, - {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1014fbf665fef86cdfd6cb5b7371496ce35e4d2a00cda501cf9f5b9e6fced69f"}, - {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3684bc2ff328f935981847082ba4fdc950d58906a40eafa93510d1b54c08a66c"}, - {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:581ea96f92bf71a5ec0974001f900db495488434a6928a2ca7f01eee20c23805"}, - {file = "coverage-7.5.4-cp312-cp312-win32.whl", hash = "sha256:73ca8fbc5bc622e54627314c1a6f1dfdd8db69788f3443e752c215f29fa87a0b"}, - {file = "coverage-7.5.4-cp312-cp312-win_amd64.whl", hash = "sha256:cef4649ec906ea7ea5e9e796e68b987f83fa9a718514fe147f538cfeda76d7a7"}, - {file = "coverage-7.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdd31315fc20868c194130de9ee6bfd99755cc9565edff98ecc12585b90be882"}, - {file = "coverage-7.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:02ff6e898197cc1e9fa375581382b72498eb2e6d5fc0b53f03e496cfee3fac6d"}, - {file = "coverage-7.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d05c16cf4b4c2fc880cb12ba4c9b526e9e5d5bb1d81313d4d732a5b9fe2b9d53"}, - {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5986ee7ea0795a4095ac4d113cbb3448601efca7f158ec7f7087a6c705304e4"}, - {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df54843b88901fdc2f598ac06737f03d71168fd1175728054c8f5a2739ac3e4"}, - {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ab73b35e8d109bffbda9a3e91c64e29fe26e03e49addf5b43d85fc426dde11f9"}, - {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:aea072a941b033813f5e4814541fc265a5c12ed9720daef11ca516aeacd3bd7f"}, - {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:16852febd96acd953b0d55fc842ce2dac1710f26729b31c80b940b9afcd9896f"}, - {file = "coverage-7.5.4-cp38-cp38-win32.whl", hash = "sha256:8f894208794b164e6bd4bba61fc98bf6b06be4d390cf2daacfa6eca0a6d2bb4f"}, - {file = "coverage-7.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:e2afe743289273209c992075a5a4913e8d007d569a406ffed0bd080ea02b0633"}, - {file = "coverage-7.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b95c3a8cb0463ba9f77383d0fa8c9194cf91f64445a63fc26fb2327e1e1eb088"}, - {file = "coverage-7.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3d7564cc09dd91b5a6001754a5b3c6ecc4aba6323baf33a12bd751036c998be4"}, - {file = "coverage-7.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44da56a2589b684813f86d07597fdf8a9c6ce77f58976727329272f5a01f99f7"}, - {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e16f3d6b491c48c5ae726308e6ab1e18ee830b4cdd6913f2d7f77354b33f91c8"}, - {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbc5958cb471e5a5af41b0ddaea96a37e74ed289535e8deca404811f6cb0bc3d"}, - {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a04e990a2a41740b02d6182b498ee9796cf60eefe40cf859b016650147908029"}, - {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ddbd2f9713a79e8e7242d7c51f1929611e991d855f414ca9996c20e44a895f7c"}, - {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b1ccf5e728ccf83acd313c89f07c22d70d6c375a9c6f339233dcf792094bcbf7"}, - {file = "coverage-7.5.4-cp39-cp39-win32.whl", hash = "sha256:56b4eafa21c6c175b3ede004ca12c653a88b6f922494b023aeb1e836df953ace"}, - {file = "coverage-7.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:65e528e2e921ba8fd67d9055e6b9f9e34b21ebd6768ae1c1723f4ea6ace1234d"}, - {file = "coverage-7.5.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:79b356f3dd5b26f3ad23b35c75dbdaf1f9e2450b6bcefc6d0825ea0aa3f86ca5"}, - {file = "coverage-7.5.4.tar.gz", hash = "sha256:a44963520b069e12789d0faea4e9fdb1e410cdc4aab89d94f7f55cbb7fef0353"}, + {file = "coverage-7.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dff044f661f59dace805eedb4a7404c573b6ff0cdba4a524141bc63d7be5c7fd"}, + {file = "coverage-7.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8659fd33ee9e6ca03950cfdcdf271d645cf681609153f218826dd9805ab585c"}, + {file = "coverage-7.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7792f0ab20df8071d669d929c75c97fecfa6bcab82c10ee4adb91c7a54055463"}, + {file = "coverage-7.6.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4b3cd1ca7cd73d229487fa5caca9e4bc1f0bca96526b922d61053ea751fe791"}, + {file = "coverage-7.6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7e128f85c0b419907d1f38e616c4f1e9f1d1b37a7949f44df9a73d5da5cd53c"}, + {file = "coverage-7.6.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a94925102c89247530ae1dab7dc02c690942566f22e189cbd53579b0693c0783"}, + {file = "coverage-7.6.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:dcd070b5b585b50e6617e8972f3fbbee786afca71b1936ac06257f7e178f00f6"}, + {file = "coverage-7.6.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d50a252b23b9b4dfeefc1f663c568a221092cbaded20a05a11665d0dbec9b8fb"}, + {file = "coverage-7.6.0-cp310-cp310-win32.whl", hash = "sha256:0e7b27d04131c46e6894f23a4ae186a6a2207209a05df5b6ad4caee6d54a222c"}, + {file = "coverage-7.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:54dece71673b3187c86226c3ca793c5f891f9fc3d8aa183f2e3653da18566169"}, + {file = "coverage-7.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7b525ab52ce18c57ae232ba6f7010297a87ced82a2383b1afd238849c1ff933"}, + {file = "coverage-7.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bea27c4269234e06f621f3fac3925f56ff34bc14521484b8f66a580aacc2e7d"}, + {file = "coverage-7.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed8d1d1821ba5fc88d4a4f45387b65de52382fa3ef1f0115a4f7a20cdfab0e94"}, + {file = "coverage-7.6.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01c322ef2bbe15057bc4bf132b525b7e3f7206f071799eb8aa6ad1940bcf5fb1"}, + {file = "coverage-7.6.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03cafe82c1b32b770a29fd6de923625ccac3185a54a5e66606da26d105f37dac"}, + {file = "coverage-7.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0d1b923fc4a40c5832be4f35a5dab0e5ff89cddf83bb4174499e02ea089daf57"}, + {file = "coverage-7.6.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4b03741e70fb811d1a9a1d75355cf391f274ed85847f4b78e35459899f57af4d"}, + {file = "coverage-7.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a73d18625f6a8a1cbb11eadc1d03929f9510f4131879288e3f7922097a429f63"}, + {file = "coverage-7.6.0-cp311-cp311-win32.whl", hash = "sha256:65fa405b837060db569a61ec368b74688f429b32fa47a8929a7a2f9b47183713"}, + {file = "coverage-7.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:6379688fb4cfa921ae349c76eb1a9ab26b65f32b03d46bb0eed841fd4cb6afb1"}, + {file = "coverage-7.6.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f7db0b6ae1f96ae41afe626095149ecd1b212b424626175a6633c2999eaad45b"}, + {file = "coverage-7.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bbdf9a72403110a3bdae77948b8011f644571311c2fb35ee15f0f10a8fc082e8"}, + {file = "coverage-7.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cc44bf0315268e253bf563f3560e6c004efe38f76db03a1558274a6e04bf5d5"}, + {file = "coverage-7.6.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da8549d17489cd52f85a9829d0e1d91059359b3c54a26f28bec2c5d369524807"}, + {file = "coverage-7.6.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0086cd4fc71b7d485ac93ca4239c8f75732c2ae3ba83f6be1c9be59d9e2c6382"}, + {file = "coverage-7.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1fad32ee9b27350687035cb5fdf9145bc9cf0a094a9577d43e909948ebcfa27b"}, + {file = "coverage-7.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:044a0985a4f25b335882b0966625270a8d9db3d3409ddc49a4eb00b0ef5e8cee"}, + {file = "coverage-7.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:76d5f82213aa78098b9b964ea89de4617e70e0d43e97900c2778a50856dac605"}, + {file = "coverage-7.6.0-cp312-cp312-win32.whl", hash = "sha256:3c59105f8d58ce500f348c5b56163a4113a440dad6daa2294b5052a10db866da"}, + {file = "coverage-7.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:ca5d79cfdae420a1d52bf177de4bc2289c321d6c961ae321503b2ca59c17ae67"}, + {file = "coverage-7.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d39bd10f0ae453554798b125d2f39884290c480f56e8a02ba7a6ed552005243b"}, + {file = "coverage-7.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:beb08e8508e53a568811016e59f3234d29c2583f6b6e28572f0954a6b4f7e03d"}, + {file = "coverage-7.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2e16f4cd2bc4d88ba30ca2d3bbf2f21f00f382cf4e1ce3b1ddc96c634bc48ca"}, + {file = "coverage-7.6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6616d1c9bf1e3faea78711ee42a8b972367d82ceae233ec0ac61cc7fec09fa6b"}, + {file = "coverage-7.6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad4567d6c334c46046d1c4c20024de2a1c3abc626817ae21ae3da600f5779b44"}, + {file = "coverage-7.6.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d17c6a415d68cfe1091d3296ba5749d3d8696e42c37fca5d4860c5bf7b729f03"}, + {file = "coverage-7.6.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9146579352d7b5f6412735d0f203bbd8d00113a680b66565e205bc605ef81bc6"}, + {file = "coverage-7.6.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:cdab02a0a941af190df8782aafc591ef3ad08824f97850b015c8c6a8b3877b0b"}, + {file = "coverage-7.6.0-cp38-cp38-win32.whl", hash = "sha256:df423f351b162a702c053d5dddc0fc0ef9a9e27ea3f449781ace5f906b664428"}, + {file = "coverage-7.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:f2501d60d7497fd55e391f423f965bbe9e650e9ffc3c627d5f0ac516026000b8"}, + {file = "coverage-7.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7221f9ac9dad9492cecab6f676b3eaf9185141539d5c9689d13fd6b0d7de840c"}, + {file = "coverage-7.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ddaaa91bfc4477d2871442bbf30a125e8fe6b05da8a0015507bfbf4718228ab2"}, + {file = "coverage-7.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4cbe651f3904e28f3a55d6f371203049034b4ddbce65a54527a3f189ca3b390"}, + {file = "coverage-7.6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831b476d79408ab6ccfadaaf199906c833f02fdb32c9ab907b1d4aa0713cfa3b"}, + {file = "coverage-7.6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46c3d091059ad0b9c59d1034de74a7f36dcfa7f6d3bde782c49deb42438f2450"}, + {file = "coverage-7.6.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4d5fae0a22dc86259dee66f2cc6c1d3e490c4a1214d7daa2a93d07491c5c04b6"}, + {file = "coverage-7.6.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:07ed352205574aad067482e53dd606926afebcb5590653121063fbf4e2175166"}, + {file = "coverage-7.6.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:49c76cdfa13015c4560702574bad67f0e15ca5a2872c6a125f6327ead2b731dd"}, + {file = "coverage-7.6.0-cp39-cp39-win32.whl", hash = "sha256:482855914928c8175735a2a59c8dc5806cf7d8f032e4820d52e845d1f731dca2"}, + {file = "coverage-7.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:543ef9179bc55edfd895154a51792b01c017c87af0ebaae092720152e19e42ca"}, + {file = "coverage-7.6.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:6fe885135c8a479d3e37a7aae61cbd3a0fb2deccb4dda3c25f92a49189f766d6"}, + {file = "coverage-7.6.0.tar.gz", hash = "sha256:289cc803fa1dc901f84701ac10c9ee873619320f2f9aff38794db4a4a0268d51"}, ] [package.extras] @@ -1704,13 +1704,9 @@ files = [ {file = "lxml-5.2.2-cp36-cp36m-win_amd64.whl", hash = "sha256:edcfa83e03370032a489430215c1e7783128808fd3e2e0a3225deee278585196"}, {file = "lxml-5.2.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:28bf95177400066596cdbcfc933312493799382879da504633d16cf60bba735b"}, {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a745cc98d504d5bd2c19b10c79c61c7c3df9222629f1b6210c0368177589fb8"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b590b39ef90c6b22ec0be925b211298e810b4856909c8ca60d27ffbca6c12e6"}, {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b336b0416828022bfd5a2e3083e7f5ba54b96242159f83c7e3eebaec752f1716"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:c2faf60c583af0d135e853c86ac2735ce178f0e338a3c7f9ae8f622fd2eb788c"}, {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:4bc6cb140a7a0ad1f7bc37e018d0ed690b7b6520ade518285dc3171f7a117905"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7ff762670cada8e05b32bf1e4dc50b140790909caa8303cfddc4d702b71ea184"}, {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:57f0a0bbc9868e10ebe874e9f129d2917750adf008fe7b9c1598c0fbbfdde6a6"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:a6d2092797b388342c1bc932077ad232f914351932353e2e8706851c870bca1f"}, {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:60499fe961b21264e17a471ec296dcbf4365fbea611bf9e303ab69db7159ce61"}, {file = "lxml-5.2.2-cp37-cp37m-win32.whl", hash = "sha256:d9b342c76003c6b9336a80efcc766748a333573abf9350f4094ee46b006ec18f"}, {file = "lxml-5.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b16db2770517b8799c79aa80f4053cd6f8b716f21f8aca962725a9565ce3ee40"}, From 8844df44cee84a584575ebe3a3d058c1acde7c7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Fri, 12 Jul 2024 12:12:55 +0200 Subject: [PATCH 21/35] feat: add async_v2 --- pyinfra/examples.py | 54 ++++-- pyinfra/queue/async_tenants.py | 152 +++++++++------ .../queue/async_tenants_v2.py | 180 +++++++----------- scripts/send_async_request.py | 153 +++++++++++++++ scripts/send_request.py | 15 +- scripts/send_threaded_request.py | 19 +- 6 files changed, 375 insertions(+), 198 deletions(-) rename scripts/test_async_class.py => pyinfra/queue/async_tenants_v2.py (59%) create mode 100644 scripts/send_async_request.py diff --git a/pyinfra/examples.py b/pyinfra/examples.py index b643e8b..b5c4355 100644 --- a/pyinfra/examples.py +++ b/pyinfra/examples.py @@ -3,12 +3,14 @@ import asyncio from dynaconf import Dynaconf from fastapi import FastAPI from kn_utils.logging import logger -from threading import Thread + +# from threading import Thread from pyinfra.config.loader import get_pyinfra_validators, validate_settings + +# from pyinfra.queue.threaded_tenants import ServiceQueueManager, TenantQueueManager +from pyinfra.queue.async_tenants_v2 import RabbitMQConfig, RabbitMQHandler from pyinfra.queue.callback import Callback -from pyinfra.queue.threaded_tenants import ServiceQueueManager, TenantQueueManager -from pyinfra.queue.async_tenants import AsyncQueueManager -from pyinfra.utils.opentelemetry import instrument_pika, setup_trace, instrument_app +from pyinfra.utils.opentelemetry import instrument_app, instrument_pika, setup_trace from pyinfra.webserver.prometheus import ( add_prometheus_endpoint, make_prometheus_processing_time_decorator_from_settings, @@ -19,6 +21,22 @@ from pyinfra.webserver.utils import ( ) +def get_rabbitmq_config(settings: Dynaconf): + return RabbitMQConfig( + host=settings.rabbitmq.host, + port=settings.rabbitmq.port, + username=settings.rabbitmq.username, + password=settings.rabbitmq.password, + heartbeat=settings.rabbitmq.heartbeat, + input_queue_prefix=settings.rabbitmq.service_request_queue_prefix, + tenant_exchange_name=settings.rabbitmq.service_response_queue_prefix, + service_request_exchange_name=settings.rabbitmq.service_request_exchange_name, + service_response_exchange_name=settings.rabbitmq.service_response_exchange_name, + service_dead_letter_queue_name=settings.rabbitmq.service_dlq_name, + queue_expiration_time=settings.rabbitmq.queue_expiration_time, + ) + + def start_standard_queue_consumer( callback: Callback, settings: Dynaconf, @@ -37,8 +55,8 @@ def start_standard_queue_consumer( app = app or FastAPI() - tenant_manager = TenantQueueManager(settings) - service_manager = ServiceQueueManager(settings) + # tenant_manager = TenantQueueManager(settings) + # service_manager = ServiceQueueManager(settings) if settings.metrics.prometheus.enabled: logger.info("Prometheus metrics enabled.") @@ -52,20 +70,24 @@ def start_standard_queue_consumer( instrument_app(app) # manager = AsyncQueueManager(settings=settings, message_processor=callback) + config = get_rabbitmq_config(settings) + manager = RabbitMQHandler( + config=config, tenant_service_url=settings.storage.tenant_server.endpoint, message_processor=callback + ) - app = add_health_check_endpoint(app, service_manager.is_ready) - # app = add_health_check_endpoint(app, manager.is_ready) + # app = add_health_check_endpoint(app, service_manager.is_ready) + app = add_health_check_endpoint(app, manager.is_ready) webserver_thread = create_webserver_thread_from_settings(app, settings) webserver_thread.start() - thread_t = Thread(target=tenant_manager.start_consuming, daemon=True) - thread_s = Thread(target=service_manager.start_sequential_basic_get, args=(callback,), daemon=True) + # thread_t = Thread(target=tenant_manager.start_consuming, daemon=True) + # thread_s = Thread(target=service_manager.start_sequential_basic_get, args=(callback,), daemon=True) - thread_t.start() - thread_s.start() + # thread_t.start() + # thread_s.start() - thread_t.join() - thread_s.join() - - # asyncio.run(manager.start_processing()) \ No newline at end of file + # thread_t.join() + # thread_s.join() + + asyncio.run(manager.run()) diff --git a/pyinfra/queue/async_tenants.py b/pyinfra/queue/async_tenants.py index 17e912e..9bdb3a6 100644 --- a/pyinfra/queue/async_tenants.py +++ b/pyinfra/queue/async_tenants.py @@ -1,28 +1,31 @@ -import aiormq import asyncio -import aio_pika import concurrent.futures -import requests +import datetime import json - -from aio_pika import Message, DeliveryMode -from aio_pika.abc import AbstractIncomingMessage -from dynaconf import Dynaconf +import time +import uuid from typing import Callable, Union +import aio_pika +import aiormq +import requests +from aio_pika import DeliveryMode, Message +from aio_pika.abc import AbstractIncomingMessage +from dynaconf import Dynaconf from kn_utils.logging import logger -from pyinfra.config.loader import validate_settings +from pyinfra.config.loader import ( + load_settings, + local_pyinfra_root_path, + validate_settings, +) from pyinfra.config.validators import queue_manager_validators -from pyinfra.config.loader import load_settings, local_pyinfra_root_path -from pyinfra.queue.callback import make_download_process_upload_callback - MessageProcessor = Callable[[dict], dict] class AsyncQueueManager: - + def __init__(self, settings: Dynaconf, message_processor: Callable = None) -> None: validate_settings(settings, queue_manager_validators) @@ -36,12 +39,12 @@ class AsyncQueueManager: self.connection_sleep = settings.rabbitmq.connection_sleep self.queue_expiration_time = settings.rabbitmq.queue_expiration_time - + self.tenant_created_queue_name = self.get_tenant_created_queue_name(settings) self.tenant_deleted_queue_name = self.get_tenant_deleted_queue_name(settings) self.tenant_events_dlq_name = self.get_tenant_events_dlq_name(settings) self.tenant_exchange_name = settings.rabbitmq.tenant_exchange_name - + self.service_request_exchange_name = settings.rabbitmq.service_request_exchange_name self.service_response_exchange_name = settings.rabbitmq.service_response_exchange_name @@ -50,18 +53,16 @@ class AsyncQueueManager: self.service_dlq_name = settings.rabbitmq.service_dlq_name - @staticmethod def get_connection_params(settings: Dynaconf): return { - "host": settings.rabbitmq.host, "port": settings.rabbitmq.port, "login": settings.rabbitmq.username, - "password":settings.rabbitmq.password, - "client_properties": {"heartbeat": settings.rabbitmq.heartbeat} + "password": settings.rabbitmq.password, + "client_properties": {"heartbeat": settings.rabbitmq.heartbeat}, } - + def get_initial_tenant_ids(self, tenant_endpoint_url: str) -> set: response = requests.get(tenant_endpoint_url, timeout=10) response.raise_for_status() # Raise an HTTPError for bad responses @@ -70,7 +71,7 @@ class AsyncQueueManager: tenants = {tenant["tenantId"] for tenant in response.json()} return tenants return set() - + def get_tenant_created_queue_name(self, settings: Dynaconf) -> str: return self.get_queue_name_with_suffix( suffix=settings.rabbitmq.tenant_created_event_queue_suffix, pod_name=settings.kubernetes.pod_name @@ -96,7 +97,7 @@ class AsyncQueueManager: def get_default_queue_name(self): raise NotImplementedError("Queue name method not implemented") - + async def is_ready(self) -> bool: await self.connect() return self.channel.is_open @@ -108,7 +109,9 @@ class AsyncQueueManager: for tenant_id in self.active_tenants: service_request_queue = await self.channel.get_queue(f"{self.service_request_queue_prefix}_{tenant_id}") await service_request_queue.purge() - service_response_queue = await self.channel.get_queue(f"{self.service_response_queue_prefix}_{tenant_id}") + service_response_queue = await self.channel.get_queue( + f"{self.service_response_queue_prefix}_{tenant_id}" + ) await service_response_queue.purge() logger.info("Queues purged.") except aio_pika.exceptions.ChannelInvalidStateError: @@ -132,12 +135,15 @@ class AsyncQueueManager: await asyncio.gather(tenant_events, service_events) - async def initialize_queues(self): await self.channel.set_qos(prefetch_count=1) - service_request_exchange = await self.channel.declare_exchange(name=self.service_request_exchange_name, type=aio_pika.ExchangeType.DIRECT) - service_response_exchange = await self.channel.declare_exchange(name=self.service_response_exchange_name, type=aio_pika.ExchangeType.DIRECT) + service_request_exchange = await self.channel.declare_exchange( + name=self.service_request_exchange_name, type=aio_pika.ExchangeType.DIRECT, durable=True + ) + service_response_exchange = await self.channel.declare_exchange( + name=self.service_response_exchange_name, type=aio_pika.ExchangeType.DIRECT, durable=True + ) for tenant_id in self.active_tenants: request_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" @@ -164,26 +170,30 @@ class AsyncQueueManager: }, ) await response_queue.bind(exchange=service_response_exchange, routing_key=tenant_id) - + async def handle_tenant_events(self): # Declare the topic exchange for tenant events exchange = await self.channel.declare_exchange( - self.tenant_exchange_name, aio_pika.ExchangeType.TOPIC + self.tenant_exchange_name, aio_pika.ExchangeType.TOPIC, durable=True ) # Declare a queue for receiving tenant events - queue = await self.channel.declare_queue("tenant_events_queue", arguments={ + queue = await self.channel.declare_queue( + "tenant_events_queue", + arguments={ "x-dead-letter-exchange": "", "x-dead-letter-routing-key": self.tenant_events_dlq_name, }, - durable=True,) - + durable=True, + ) + await queue.bind(exchange, routing_key="tenant.*") async with queue.iterator() as queue_iter: async for message in queue_iter: async with message.process(reject_on_redelivered=True): routing_key = message.routing_key - tenant_id = message.body.decode() + message_body = json.loads(message.body.decode()) + tenant_id = message_body["tenantId"] if routing_key == "tenant.created": # Handle tenant creation await self.handle_tenant_created(tenant_id) @@ -222,7 +232,7 @@ class AsyncQueueManager: exchange = await self.channel.get_exchange(self.service_request_exchange_name) await queue.bind(exchange=exchange, routing_key=tenant_id) self.active_tenants.add(tenant_id) - + logger.info(f"Created queue for tenant {tenant_id}") async def delete_tenant_queues(self, tenant_id): queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" @@ -245,8 +255,15 @@ class AsyncQueueManager: async def publish_to_service_response_queue(self, tenant_id, result): service_response_exchange = await self.channel.get_exchange(self.service_response_exchange_name) - await service_response_exchange.publish(aio_pika.Message(body=json.dumps(result).encode()), - routing_key=tenant_id,) + await service_response_exchange.publish( + Message( + body=json.dumps(result).encode(), + delivery_mode=DeliveryMode.NOT_PERSISTENT, + timestamp=datetime.datetime.now(), + message_id=str(uuid.uuid4()), + ), + routing_key=tenant_id, + ) async def restart_consumers(self): # Stop current consumers and start new ones for active tenants @@ -284,11 +301,13 @@ class AsyncQueueManager: async def process_message_body_and_await_result(unpacked_message_body): # Processing the message in a separate thread is necessary for the main thread pika client to be able to # process data events (e.g. heartbeats) while the message is being processed. - with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_pool_executor: - logger.info("Processing payload in separate thread.") - future = thread_pool_executor.submit(message_processor, unpacked_message_body) + # with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_pool_executor: + # logger.info("Processing payload in separate thread.") + # future = thread_pool_executor.submit(message_processor, unpacked_message_body) - return future.result() + # return future.result() + + return {"result": "lovely"} async def on_message_callback(message: AbstractIncomingMessage): logger.info(f"Received message from queue with delivery_tag {message.delivery_tag}.") @@ -312,12 +331,13 @@ class AsyncQueueManager: ) logger.debug(f"Processing message with {filtered_message_headers=}.") result: dict = await ( - process_message_body_and_await_result({**json.loads(message.body), **filtered_message_headers}) or {} + process_message_body_and_await_result({**json.loads(message.body), **filtered_message_headers}) + or {} ) await self.publish_to_service_response_queue(tenant_id, result) logger.info(f"Published result to queue {tenant_id}.") - + await message.ack() logger.debug(f"Message with {message.delivery_tag=} acknowledged.") except FileNotFoundError as e: @@ -330,9 +350,8 @@ class AsyncQueueManager: raise return on_message_callback - - async def publish_message_to_input_queue( - self, tenant_id: str, message: Union[str, bytes, dict]) -> None: + + async def publish_message_to_input_queue(self, tenant_id: str, message: Union[str, bytes, dict]) -> None: if isinstance(message, str): message = message.encode("utf-8") elif isinstance(message, dict): @@ -342,12 +361,19 @@ class AsyncQueueManager: service_request_exchange = await self.channel.get_exchange(self.service_request_exchange_name) - await service_request_exchange.publish(message=Message(body=message, delivery_mode=DeliveryMode.NOT_PERSISTENT), routing_key=tenant_id) + await service_request_exchange.publish( + message=Message( + body=message, + delivery_mode=DeliveryMode.NOT_PERSISTENT, + timestamp=datetime.datetime.now(), + message_id=str(uuid.uuid4()), + ), + routing_key=tenant_id, + ) logger.info(f"Published message to queue {tenant_id}.") - async def publish_message_to_tenant_created_queue( - self, message: Union[str, bytes, dict]) -> None: + async def publish_message_to_tenant_created_queue(self, message: Union[str, bytes, dict]) -> None: if isinstance(message, str): message = message.encode("utf-8") elif isinstance(message, dict): @@ -356,12 +382,19 @@ class AsyncQueueManager: await self.establish_connection() service_request_exchange = await self.channel.get_exchange(self.tenant_exchange_name) - await service_request_exchange.publish(message=Message(body=message, delivery_mode=DeliveryMode.NOT_PERSISTENT), routing_key="tenant.created") + await service_request_exchange.publish( + message=Message( + body=message, + delivery_mode=DeliveryMode.NOT_PERSISTENT, + timestamp=datetime.datetime.now(), + message_id=str(uuid.uuid4()), + ), + routing_key="tenant.created", + ) logger.info(f"Published message to queue {self.tenant_created_queue_name}.") - - async def publish_message_to_tenant_deleted_queue( - self, message: Union[str, bytes, dict]) -> None: + + async def publish_message_to_tenant_deleted_queue(self, message: Union[str, bytes, dict]) -> None: if isinstance(message, str): message = message.encode("utf-8") elif isinstance(message, dict): @@ -370,15 +403,22 @@ class AsyncQueueManager: await self.establish_connection() service_request_exchange = await self.channel.get_exchange(self.tenant_exchange_name) - await service_request_exchange.publish(message=Message(body=message, delivery_mode=DeliveryMode.NOT_PERSISTENT), routing_key="tenant.delete") + await service_request_exchange.publish( + message=Message( + body=message, + delivery_mode=DeliveryMode.NOT_PERSISTENT, + timestamp=datetime.datetime.now(), + message_id=str(uuid.uuid4()), + ), + routing_key="tenant.delete", + ) logger.info(f"Published message to queue {self.tenant_deleted_queue_name}.") - - async def main() -> None: import time + settings = load_settings(local_pyinfra_root_path / "config/") callback = "" @@ -386,10 +426,10 @@ async def main() -> None: await manager.main_loop() - while True: time.sleep(100) print("keep idling") -if __name__ == '__main__': - asyncio.run(main()) \ No newline at end of file + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/test_async_class.py b/pyinfra/queue/async_tenants_v2.py similarity index 59% rename from scripts/test_async_class.py rename to pyinfra/queue/async_tenants_v2.py index d3fe7d9..6a16543 100644 --- a/scripts/test_async_class.py +++ b/pyinfra/queue/async_tenants_v2.py @@ -1,17 +1,18 @@ import asyncio -import gzip -from operator import itemgetter -from typing import Any, Callable, Dict, Set -import aiohttp -from aio_pika import connect_robust, ExchangeType, Message, IncomingMessage -from aio_pika.abc import AbstractIncomingMessage, AbstractChannel, AbstractConnection, AbstractExchange import json -from kn_utils.logging import logger -from pyinfra.config.loader import load_settings, local_pyinfra_root_path -from pyinfra.storage.storages.s3 import get_s3_storage_from_settings +import signal from dataclasses import dataclass, field +from typing import Any, Callable, Dict, Set -settings = load_settings(local_pyinfra_root_path / "config/") +import aiohttp +from aio_pika import ExchangeType, IncomingMessage, Message, connect_robust +from aio_pika.abc import ( + AbstractChannel, + AbstractConnection, + AbstractExchange, + AbstractIncomingMessage, +) +from kn_utils.logging import logger @dataclass @@ -63,6 +64,10 @@ class RabbitMQHandler: self.channel = await self.connection.channel() await self.channel.set_qos(prefetch_count=1) + async def is_ready(self) -> bool: + await self.connect() + return self.channel.is_open + async def setup_exchanges(self): self.tenant_exchange = await self.channel.declare_exchange( self.config.tenant_exchange_name, ExchangeType.TOPIC, durable=True @@ -75,6 +80,7 @@ class RabbitMQHandler: ) async def setup_tenant_queue(self) -> None: + # TODO: Add k8s pod_name to tenant queue name - add DLQ? queue = await self.channel.declare_queue( "tenant_queue", durable=True, @@ -93,7 +99,7 @@ class RabbitMQHandler: tenant_id = message_body["tenantId"] routing_key = message.routing_key - if routing_key == "tenant.create": + if routing_key == "tenant.created": await self.create_tenant_queues(tenant_id) elif routing_key == "tenant.delete": await self.delete_tenant_queues(tenant_id) @@ -119,16 +125,30 @@ class RabbitMQHandler: async def delete_tenant_queues(self, tenant_id: str) -> None: if tenant_id in self.tenant_queues: - input_queue = self.tenant_queues[tenant_id] - await input_queue.delete() + # somehow queue.delete() does not work here + await self.channel.queue_delete(f"{self.config.input_queue_prefix}_{tenant_id}") del self.tenant_queues[tenant_id] logger.info(f"Deleted queues for tenant {tenant_id}") async def process_input_message(self, message: IncomingMessage) -> None: - async with message.process(): + async def process_message_body_and_await_result(unpacked_message_body): + return self.message_processor(unpacked_message_body) + + async with message.process(ignore_processed=True): + if message.redelivered: + logger.warning(f"Declining message with {message.delivery_tag=} due to it being redelivered.") + await message.nack(requeue=False) + return + + if message.body.decode("utf-8") == "STOP": + logger.info("Received stop signal, stopping consumption...") + await message.ack() + # TODO: shutdown is probably not the right call here - align w/ Dev what should happen on stop signal + await self.shutdown() + return + try: tenant_id = message.routing_key - message_body = json.loads(message.body.decode("utf-8")) filtered_message_headers = ( {k: v for k, v in message.headers.items() if k.lower().startswith("x-")} if message.headers else {} @@ -136,18 +156,26 @@ class RabbitMQHandler: logger.debug(f"Processing message with {filtered_message_headers=}.") - message_body.update(filtered_message_headers) - - result = await self.message_processor(message_body) + result: dict = await ( + process_message_body_and_await_result({**json.loads(message.body), **filtered_message_headers}) + or {} + ) if result: await self.publish_to_output_exchange(tenant_id, result, filtered_message_headers) + await message.ack() + logger.debug(f"Message with {message.delivery_tag=} acknowledged.") + else: + raise ValueError(f"Could not process message with {message.body=}.") except json.JSONDecodeError: + await message.nack(requeue=False) logger.error(f"Invalid JSON in input message: {message.body}") except FileNotFoundError as e: - logger.warning(f"{e}, declining message.") + logger.warning(f"{e}, declining message with {message.delivery_tag=}.") + await message.nack(requeue=False) except Exception as e: + await message.nack(requeue=False) logger.error(f"Error processing input message: {e}", exc_info=True) raise @@ -162,6 +190,8 @@ class RabbitMQHandler: try: async with aiohttp.ClientSession() as session: async with session.get(self.tenant_service_url) as response: + # TODO: dont know if we should check for 200, could also be 2xx + # maybe handle bad requests with response.raise_for_status() if response.status == 200: data = await response.json() return {tenant["tenantId"] for tenant in data} @@ -178,105 +208,37 @@ class RabbitMQHandler: await self.create_tenant_queues(tenant_id) async def run(self) -> None: + stop = asyncio.Event() + + def signal_handler(*_): + logger.info("Signal received, shutting down...") + stop.set() + + loop = asyncio.get_running_loop() + for sig in (signal.SIGINT, signal.SIGTERM): + loop.add_signal_handler(sig, signal_handler) + try: await self.connect() await self.setup_exchanges() await self.initialize_tenant_queues() await self.setup_tenant_queue() + logger.info("RabbitMQ handler is running. Press CTRL+C to exit.") - await asyncio.Future() # Run forever + await stop.wait() # Run until stop signal received except asyncio.CancelledError: - logger.info("Shutting down RabbitMQ handler...") + logger.info("Operation cancelled.") except Exception as e: logger.error(f"An error occurred: {e}", exc_info=True) finally: - if self.connection: - await self.connection.close() + await self.shutdown() + async def shutdown(self) -> None: + logger.info("Shutting down RabbitMQ handler...") + if self.channel: + await self.channel.close() + if self.connection: + await self.connection.close() + logger.info("RabbitMQ handler shut down successfully.") -async def dummy_message_processor(message: Dict[str, Any]) -> Dict[str, Any]: - logger.info(f"Processing message: {message}") - await asyncio.sleep(1) # Simulate processing time - - storage = get_s3_storage_from_settings(settings) - tenant_id, dossier_id, file_id = itemgetter("tenantId", "dossierId", "fileId")(message) - suffix = message["responseFileExtension"] - - object_name = f"{tenant_id}/{dossier_id}/{file_id}.{message['targetFileExtension']}" - original_content = json.loads(gzip.decompress(storage.get_object(object_name))) - processed_content = { - "processedPages": original_content["numberOfPages"], - "processedSectionTexts": f"Processed: {original_content['sectionTexts']}", - } - - processed_object_name = f"{tenant_id}/{dossier_id}/{file_id}.{suffix}" - processed_data = gzip.compress(json.dumps(processed_content).encode("utf-8")) - storage.put_object(processed_object_name, processed_data) - - processed_message = message.copy() - processed_message["processed"] = True - processed_message["processor_message"] = "This message was processed by the dummy processor" - - logger.info(f"Finished processing message. Result: {processed_message}") - return processed_message - - -async def test_rabbitmq_handler() -> None: - tenant_service_url = settings.storage.tenant_server.endpoint - - config = RabbitMQConfig( - host=settings.rabbitmq.host, - port=settings.rabbitmq.port, - username=settings.rabbitmq.username, - password=settings.rabbitmq.password, - heartbeat=settings.rabbitmq.heartbeat, - input_queue_prefix=settings.rabbitmq.service_request_queue_prefix, - tenant_exchange_name=settings.rabbitmq.service_response_queue_prefix, - service_request_exchange_name=settings.rabbitmq.service_request_exchange_name, - service_response_exchange_name=settings.rabbitmq.service_response_exchange_name, - service_dead_letter_queue_name=settings.rabbitmq.service_dlq_name, - queue_expiration_time=settings.rabbitmq.queue_expiration_time, - ) - - handler = RabbitMQHandler(config, tenant_service_url, dummy_message_processor) - - await handler.connect() - await handler.setup_exchanges() - await handler.initialize_tenant_queues() - await handler.setup_tenant_queue() - - tenant_id = "test_tenant" - - # Test tenant creation - create_message = {"tenantId": tenant_id} - await handler.tenant_exchange.publish( - Message(body=json.dumps(create_message).encode()), routing_key="tenant.create" - ) - logger.info(f"Sent create tenant message for {tenant_id}") - await asyncio.sleep(2) # Wait for queue creation - - # Test service request - service_request = { - "tenantId": tenant_id, - "dossierId": "dossier", - "fileId": "file", - "targetFileExtension": "json.gz", - "responseFileExtension": "result.json.gz", - } - await handler.input_exchange.publish(Message(body=json.dumps(service_request).encode()), routing_key=tenant_id) - logger.info(f"Sent service request for {tenant_id}") - await asyncio.sleep(5) # Wait for message processing - - # Test tenant deletion - delete_message = {"tenantId": tenant_id} - await handler.tenant_exchange.publish( - Message(body=json.dumps(delete_message).encode()), routing_key="tenant.delete" - ) - logger.info(f"Sent delete tenant message for {tenant_id}") - await asyncio.sleep(2) # Wait for queue deletion - - await handler.connection.close() - - -if __name__ == "__main__": - asyncio.run(test_rabbitmq_handler()) + # TODO: purge_queues diff --git a/scripts/send_async_request.py b/scripts/send_async_request.py new file mode 100644 index 0000000..1c095d3 --- /dev/null +++ b/scripts/send_async_request.py @@ -0,0 +1,153 @@ +import asyncio +import gzip +import json +from operator import itemgetter +from typing import Any, Dict + +from aio_pika import Message +from aio_pika.abc import AbstractIncomingMessage +from kn_utils.logging import logger + +from pyinfra.config.loader import load_settings, local_pyinfra_root_path +from pyinfra.queue.async_tenants_v2 import RabbitMQConfig, RabbitMQHandler +from pyinfra.storage.storages.s3 import S3Storage, get_s3_storage_from_settings + +settings = load_settings(local_pyinfra_root_path / "config/") + + +async def dummy_message_processor(message: Dict[str, Any]) -> Dict[str, Any]: + logger.info(f"Processing message: {message}") + # await asyncio.sleep(1) # Simulate processing time + + storage = get_s3_storage_from_settings(settings) + tenant_id, dossier_id, file_id = itemgetter("tenantId", "dossierId", "fileId")(message) + suffix = message["responseFileExtension"] + + object_name = f"{tenant_id}/{dossier_id}/{file_id}.{message['targetFileExtension']}" + original_content = json.loads(gzip.decompress(storage.get_object(object_name))) + processed_content = { + "processedPages": original_content["numberOfPages"], + "processedSectionTexts": f"Processed: {original_content['sectionTexts']}", + } + + processed_object_name = f"{tenant_id}/{dossier_id}/{file_id}.{suffix}" + processed_data = gzip.compress(json.dumps(processed_content).encode("utf-8")) + storage.put_object(processed_object_name, processed_data) + + processed_message = message.copy() + processed_message["processed"] = True + processed_message["processor_message"] = "This message was processed by the dummy processor" + + logger.info(f"Finished processing message. Result: {processed_message}") + return processed_message + + +async def on_response_message_callback(storage: S3Storage): + async def on_message(message: AbstractIncomingMessage) -> None: + async with message.process(ignore_processed=True): + if not message.body: + raise ValueError + response = json.loads(message.body) + logger.info(f"Received {response}") + logger.info(f"Message headers: {message.properties.headers}") + await message.ack() + tenant_id, dossier_id, file_id = itemgetter("tenantId", "dossierId", "fileId")(response) + suffix = response["responseFileExtension"] + result = storage.get_object(f"{tenant_id}/{dossier_id}/{file_id}.{suffix}") + result = json.loads(gzip.decompress(result)) + logger.info(f"Contents of result on storage: {result}") + + return on_message + + +def upload_json_and_make_message_body(tenant_id: str): + dossier_id, file_id, suffix = "dossier", "file", "json.gz" + content = { + "numberOfPages": 7, + "sectionTexts": "data", + } + + object_name = f"{tenant_id}/{dossier_id}/{file_id}.{suffix}" + data = gzip.compress(json.dumps(content).encode("utf-8")) + + storage = get_s3_storage_from_settings(settings) + if not storage.has_bucket(): + storage.make_bucket() + storage.put_object(object_name, data) + + message_body = { + "tenantId": tenant_id, + "dossierId": dossier_id, + "fileId": file_id, + "targetFileExtension": suffix, + "responseFileExtension": f"result.{suffix}", + } + return message_body, storage + + +async def test_rabbitmq_handler() -> None: + tenant_service_url = settings.storage.tenant_server.endpoint + + config = RabbitMQConfig( + host=settings.rabbitmq.host, + port=settings.rabbitmq.port, + username=settings.rabbitmq.username, + password=settings.rabbitmq.password, + heartbeat=settings.rabbitmq.heartbeat, + input_queue_prefix=settings.rabbitmq.service_request_queue_prefix, + tenant_exchange_name=settings.rabbitmq.service_response_queue_prefix, + service_request_exchange_name=settings.rabbitmq.service_request_exchange_name, + service_response_exchange_name=settings.rabbitmq.service_response_exchange_name, + service_dead_letter_queue_name=settings.rabbitmq.service_dlq_name, + queue_expiration_time=settings.rabbitmq.queue_expiration_time, + ) + + handler = RabbitMQHandler(config, tenant_service_url, dummy_message_processor) + + await handler.connect() + await handler.setup_exchanges() + # await handler.initialize_tenant_queues() + # await handler.setup_tenant_queue() + + # for queue in handler.tenant_queues.values(): + # await queue.purge() + + tenant_id = "test_tenant" + + # Test tenant creation + create_message = {"tenantId": tenant_id} + await handler.tenant_exchange.publish( + Message(body=json.dumps(create_message).encode()), routing_key="tenant.created" + ) + logger.info(f"Sent create tenant message for {tenant_id}") + await asyncio.sleep(2) # Wait for queue creation + + # Prepare service request + service_request, storage = upload_json_and_make_message_body(tenant_id) + + # Test service request + await handler.input_exchange.publish(Message(body=json.dumps(service_request).encode()), routing_key=tenant_id) + logger.info(f"Sent service request for {tenant_id}") + await asyncio.sleep(5) # Wait for message processing + + # Consume service request + response_queue = await handler.channel.declare_queue(name=f"response_queue_{tenant_id}") + await response_queue.bind(exchange=handler.output_exchange, routing_key=tenant_id) + callback = await on_response_message_callback(storage) + await response_queue.consume(callback=callback) + + await asyncio.sleep(5) # Wait for message processing + + # Test tenant deletion + delete_message = {"tenantId": tenant_id} + await handler.tenant_exchange.publish( + Message(body=json.dumps(delete_message).encode()), routing_key="tenant.delete" + ) + logger.info(f"Sent delete tenant message for {tenant_id}") + await asyncio.sleep(2) # Wait for queue deletion + + await handler.connection.close() + + +if __name__ == "__main__": + asyncio.run(test_rabbitmq_handler()) diff --git a/scripts/send_request.py b/scripts/send_request.py index 03d18d3..9fe3f08 100644 --- a/scripts/send_request.py +++ b/scripts/send_request.py @@ -2,14 +2,14 @@ import asyncio import gzip import json import time -from aio_pika.abc import AbstractIncomingMessage from operator import itemgetter + +from aio_pika.abc import AbstractIncomingMessage from kn_utils.logging import logger from pyinfra.config.loader import load_settings, local_pyinfra_root_path from pyinfra.queue.async_tenants import AsyncQueueManager -from pyinfra.storage.storages.s3 import get_s3_storage_from_settings -from pyinfra.storage.storages.s3 import S3Storage +from pyinfra.storage.storages.s3 import S3Storage, get_s3_storage_from_settings settings = load_settings(local_pyinfra_root_path / "config/") @@ -57,6 +57,7 @@ def on_message_callback(storage: S3Storage): result = storage.get_object(f"{tenant_id}/{dossier_id}/{file_id}.{suffix}") result = json.loads(gzip.decompress(result)) logger.info(f"Contents of result on storage: {result}") + return on_message @@ -98,9 +99,7 @@ async def send_service_request(queue_manager: AsyncQueueManager, tenant_id: str) if __name__ == "__main__": # tenant_ids = ["a", "b", "c", "d"] - queue_manager = AsyncQueueManager(settings) - - # asyncio.run(send_tenant_event(queue_manager, "test", "create")) - - asyncio.run(send_service_request(queue_manager,"redaction")) + # asyncio.run(send_tenant_event(AsyncQueueManager(settings), "test_1", "create")) + asyncio.run(send_service_request(AsyncQueueManager(settings), "redaction")) + # asyncio.run(consume_service_request(AsyncQueueManager(settings),"redaction")) diff --git a/scripts/send_threaded_request.py b/scripts/send_threaded_request.py index 1150690..ca9e2b9 100644 --- a/scripts/send_threaded_request.py +++ b/scripts/send_threaded_request.py @@ -3,6 +3,7 @@ import json import time from operator import itemgetter from threading import Thread + from kn_utils.logging import logger from pyinfra.config.loader import load_settings, local_pyinfra_root_path @@ -67,9 +68,7 @@ def send_service_request(tenant_id: str): storage = get_s3_storage_from_settings(settings) - for method_frame, properties, body in queue_manager.channel.consume( - queue=queue_name, inactivity_timeout=15 - ): + for method_frame, properties, body in queue_manager.channel.consume(queue=queue_name, inactivity_timeout=15): if not body: break response = json.loads(body) @@ -87,13 +86,15 @@ def send_service_request(tenant_id: str): if __name__ == "__main__": - tenant_ids = ["a", "b", "c", "d"] + import uuid - for tenant in tenant_ids: + unique_ids = [str(uuid.uuid4()) for _ in range(100)] + + for tenant in unique_ids: send_tenant_event(tenant_id=tenant, event_type="create") - for tenant in tenant_ids: - send_service_request(tenant_id=tenant) + # for tenant in tenant_ids: + # send_service_request(tenant_id=tenant) - for tenant in tenant_ids: - send_tenant_event(tenant_id=tenant, event_type="delete") + # for tenant in tenant_ids: + # send_tenant_event(tenant_id=tenant, event_type="delete") From 8ac16de0faa61b971cc4b9d01401a7d52d9973e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Fri, 12 Jul 2024 12:23:45 +0200 Subject: [PATCH 22/35] feat: add backwards compatibility --- pyinfra/examples.py | 34 +++++++++++++++---------------- pyinfra/queue/async_tenants_v2.py | 2 +- 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/pyinfra/examples.py b/pyinfra/examples.py index b5c4355..199a9cf 100644 --- a/pyinfra/examples.py +++ b/pyinfra/examples.py @@ -6,10 +6,11 @@ from kn_utils.logging import logger # from threading import Thread from pyinfra.config.loader import get_pyinfra_validators, validate_settings - -# from pyinfra.queue.threaded_tenants import ServiceQueueManager, TenantQueueManager from pyinfra.queue.async_tenants_v2 import RabbitMQConfig, RabbitMQHandler from pyinfra.queue.callback import Callback + +# from pyinfra.queue.threaded_tenants import ServiceQueueManager, TenantQueueManager +from pyinfra.queue.manager import QueueManager from pyinfra.utils.opentelemetry import instrument_app, instrument_pika, setup_trace from pyinfra.webserver.prometheus import ( add_prometheus_endpoint, @@ -69,25 +70,22 @@ def start_standard_queue_consumer( instrument_pika() instrument_app(app) - # manager = AsyncQueueManager(settings=settings, message_processor=callback) - config = get_rabbitmq_config(settings) - manager = RabbitMQHandler( - config=config, tenant_service_url=settings.storage.tenant_server.endpoint, message_processor=callback - ) + if settings.multiple_tenants.enabled: + config = get_rabbitmq_config(settings) + manager = RabbitMQHandler( + config=config, tenant_service_url=settings.storage.tenant_server.endpoint, message_processor=callback + ) + else: + manager = QueueManager(settings) - # app = add_health_check_endpoint(app, service_manager.is_ready) app = add_health_check_endpoint(app, manager.is_ready) webserver_thread = create_webserver_thread_from_settings(app, settings) webserver_thread.start() - # thread_t = Thread(target=tenant_manager.start_consuming, daemon=True) - # thread_s = Thread(target=service_manager.start_sequential_basic_get, args=(callback,), daemon=True) - - # thread_t.start() - # thread_s.start() - - # thread_t.join() - # thread_s.join() - - asyncio.run(manager.run()) + if isinstance(manager, RabbitMQHandler): + asyncio.run(manager.run()) + elif isinstance(manager, QueueManager): + manager.start_consuming(callback) + else: + logger.warning(f"Behavior for type {type(manager)} is not defined") diff --git a/pyinfra/queue/async_tenants_v2.py b/pyinfra/queue/async_tenants_v2.py index 6a16543..52f1cff 100644 --- a/pyinfra/queue/async_tenants_v2.py +++ b/pyinfra/queue/async_tenants_v2.py @@ -68,7 +68,7 @@ class RabbitMQHandler: await self.connect() return self.channel.is_open - async def setup_exchanges(self): + async def setup_exchanges(self) -> None: self.tenant_exchange = await self.channel.declare_exchange( self.config.tenant_exchange_name, ExchangeType.TOPIC, durable=True ) From 3c3580d3bc6c842380709a1c1619ec237e82c8ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Fri, 12 Jul 2024 12:26:56 +0200 Subject: [PATCH 23/35] feat: add backwards compatibility --- pyinfra/examples.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/pyinfra/examples.py b/pyinfra/examples.py index 199a9cf..9c21d9e 100644 --- a/pyinfra/examples.py +++ b/pyinfra/examples.py @@ -4,12 +4,9 @@ from dynaconf import Dynaconf from fastapi import FastAPI from kn_utils.logging import logger -# from threading import Thread from pyinfra.config.loader import get_pyinfra_validators, validate_settings from pyinfra.queue.async_tenants_v2 import RabbitMQConfig, RabbitMQHandler from pyinfra.queue.callback import Callback - -# from pyinfra.queue.threaded_tenants import ServiceQueueManager, TenantQueueManager from pyinfra.queue.manager import QueueManager from pyinfra.utils.opentelemetry import instrument_app, instrument_pika, setup_trace from pyinfra.webserver.prometheus import ( @@ -56,9 +53,6 @@ def start_standard_queue_consumer( app = app or FastAPI() - # tenant_manager = TenantQueueManager(settings) - # service_manager = ServiceQueueManager(settings) - if settings.metrics.prometheus.enabled: logger.info("Prometheus metrics enabled.") app = add_prometheus_endpoint(app) @@ -70,7 +64,7 @@ def start_standard_queue_consumer( instrument_pika() instrument_app(app) - if settings.multiple_tenants.enabled: + if settings.concurrency.enabled: config = get_rabbitmq_config(settings) manager = RabbitMQHandler( config=config, tenant_service_url=settings.storage.tenant_server.endpoint, message_processor=callback From 9c28498d8ae09c56e6d04f3e9cdcb99a24b3140f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Fri, 12 Jul 2024 15:12:46 +0200 Subject: [PATCH 24/35] feat: rollback testing logic for send_request --- scripts/send_request.py | 92 ++++++++++++----------------------------- 1 file changed, 27 insertions(+), 65 deletions(-) diff --git a/scripts/send_request.py b/scripts/send_request.py index 9fe3f08..d1f1fda 100644 --- a/scripts/send_request.py +++ b/scripts/send_request.py @@ -1,27 +1,24 @@ -import asyncio import gzip import json -import time from operator import itemgetter -from aio_pika.abc import AbstractIncomingMessage from kn_utils.logging import logger from pyinfra.config.loader import load_settings, local_pyinfra_root_path -from pyinfra.queue.async_tenants import AsyncQueueManager -from pyinfra.storage.storages.s3 import S3Storage, get_s3_storage_from_settings +from pyinfra.queue.manager import QueueManager +from pyinfra.storage.storages.s3 import get_s3_storage_from_settings settings = load_settings(local_pyinfra_root_path / "config/") -def upload_json_and_make_message_body(tenant_id: str): +def upload_json_and_make_message_body(): dossier_id, file_id, suffix = "dossier", "file", "json.gz" content = { "numberOfPages": 7, "sectionTexts": "data", } - object_name = f"{tenant_id}/{dossier_id}/{file_id}.{suffix}" + object_name = f"{dossier_id}/{file_id}.{suffix}" data = gzip.compress(json.dumps(content).encode("utf-8")) storage = get_s3_storage_from_settings(settings) @@ -30,7 +27,6 @@ def upload_json_and_make_message_body(tenant_id: str): storage.put_object(object_name, data) message_body = { - "tenantId": tenant_id, "dossierId": dossier_id, "fileId": file_id, "targetFileExtension": suffix, @@ -39,67 +35,33 @@ def upload_json_and_make_message_body(tenant_id: str): return message_body -def tenant_event_message(tenant_id: str): - return {"tenantId": tenant_id} +def main(): + queue_manager = QueueManager(settings) + queue_manager.purge_queues() + message = upload_json_and_make_message_body() -def on_message_callback(storage: S3Storage): - async def on_message(message: AbstractIncomingMessage) -> None: - async with message.process(): - if not message.body: - raise ValueError - response = json.loads(message.body) - logger.info(f"Received {response}") - logger.info(f"Message headers: {message.properties.headers}") - await message.ack() - tenant_id, dossier_id, file_id = itemgetter("tenantId", "dossierId", "fileId")(response) - suffix = response["responseFileExtension"] - result = storage.get_object(f"{tenant_id}/{dossier_id}/{file_id}.{suffix}") - result = json.loads(gzip.decompress(result)) - logger.info(f"Contents of result on storage: {result}") - - return on_message - - -async def send_tenant_event(queue_manager: AsyncQueueManager, tenant_id: str, event_type: str): - await queue_manager.purge_queues() - - message = tenant_event_message(tenant_id) - if event_type == "create": - await queue_manager.publish_message_to_tenant_created_queue(message=message) - elif event_type == "delete": - await queue_manager.publish_message_to_tenant_deleted_queue(message=message) - else: - logger.warning(f"Event type '{event_type}' not known.") - await queue_manager.stop_consumers() - - -async def send_service_request(queue_manager: AsyncQueueManager, tenant_id: str): - request_queue_name = f"{settings.rabbitmq.service_request_queue_prefix}_{tenant_id}" - - await queue_manager.purge_queues() - - message = upload_json_and_make_message_body(tenant_id) - - await queue_manager.publish_message_to_input_queue(tenant_id=tenant_id, message=message) - logger.info(f"Put {message} on {request_queue_name}.") + queue_manager.publish_message_to_input_queue(message) + logger.info(f"Put {message} on {settings.rabbitmq.input_queue}.") storage = get_s3_storage_from_settings(settings) - - response_queue_name = f"{settings.rabbitmq.service_response_queue_prefix}_{tenant_id}" - service_response_queue = await queue_manager.channel.get_queue(name=response_queue_name) - - time.sleep(10) - - callback = on_message_callback(storage) - await service_response_queue.consume(callback=callback) - await queue_manager.stop_consumers() + for method_frame, properties, body in queue_manager.channel.consume( + queue=settings.rabbitmq.output_queue, inactivity_timeout=15 + ): + if not body: + break + response = json.loads(body) + logger.info(f"Received {response}") + logger.info(f"Message headers: {properties.headers}") + queue_manager.channel.basic_ack(method_frame.delivery_tag) + dossier_id, file_id = itemgetter("dossierId", "fileId")(response) + suffix = message["responseFileExtension"] + print(f"{dossier_id}/{file_id}.{suffix}") + result = storage.get_object(f"{dossier_id}/{file_id}.{suffix}") + result = json.loads(gzip.decompress(result)) + logger.info(f"Contents of result on storage: {result}") + queue_manager.stop_consuming() if __name__ == "__main__": - # tenant_ids = ["a", "b", "c", "d"] - - # asyncio.run(send_tenant_event(AsyncQueueManager(settings), "test_1", "create")) - - asyncio.run(send_service_request(AsyncQueueManager(settings), "redaction")) - # asyncio.run(consume_service_request(AsyncQueueManager(settings),"redaction")) + main() From 02665a5ef821bd06e9c6e870a233620f9f5728b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Fri, 12 Jul 2024 15:14:13 +0200 Subject: [PATCH 25/35] feat: align async queue manager --- pyinfra/examples.py | 8 +- .../{async_tenants_v2.py => async_manager.py} | 20 +- pyinfra/queue/async_tenants.py | 435 ------------------ scripts/send_async_request.py | 24 +- 4 files changed, 19 insertions(+), 468 deletions(-) rename pyinfra/queue/{async_tenants_v2.py => async_manager.py} (94%) delete mode 100644 pyinfra/queue/async_tenants.py diff --git a/pyinfra/examples.py b/pyinfra/examples.py index 9c21d9e..b1f7f48 100644 --- a/pyinfra/examples.py +++ b/pyinfra/examples.py @@ -5,7 +5,7 @@ from fastapi import FastAPI from kn_utils.logging import logger from pyinfra.config.loader import get_pyinfra_validators, validate_settings -from pyinfra.queue.async_tenants_v2 import RabbitMQConfig, RabbitMQHandler +from pyinfra.queue.async_manager import AsyncQueueManager, RabbitMQConfig from pyinfra.queue.callback import Callback from pyinfra.queue.manager import QueueManager from pyinfra.utils.opentelemetry import instrument_app, instrument_pika, setup_trace @@ -27,11 +27,13 @@ def get_rabbitmq_config(settings: Dynaconf): password=settings.rabbitmq.password, heartbeat=settings.rabbitmq.heartbeat, input_queue_prefix=settings.rabbitmq.service_request_queue_prefix, + tenant_event_queue_suffix=settings.rabbitmq.tenant_event_queue_suffix, tenant_exchange_name=settings.rabbitmq.service_response_queue_prefix, service_request_exchange_name=settings.rabbitmq.service_request_exchange_name, service_response_exchange_name=settings.rabbitmq.service_response_exchange_name, service_dead_letter_queue_name=settings.rabbitmq.service_dlq_name, queue_expiration_time=settings.rabbitmq.queue_expiration_time, + pod_name=settings.kubernetes.pod_name, ) @@ -66,7 +68,7 @@ def start_standard_queue_consumer( if settings.concurrency.enabled: config = get_rabbitmq_config(settings) - manager = RabbitMQHandler( + manager = AsyncQueueManager( config=config, tenant_service_url=settings.storage.tenant_server.endpoint, message_processor=callback ) else: @@ -77,7 +79,7 @@ def start_standard_queue_consumer( webserver_thread = create_webserver_thread_from_settings(app, settings) webserver_thread.start() - if isinstance(manager, RabbitMQHandler): + if isinstance(manager, AsyncQueueManager): asyncio.run(manager.run()) elif isinstance(manager, QueueManager): manager.start_consuming(callback) diff --git a/pyinfra/queue/async_tenants_v2.py b/pyinfra/queue/async_manager.py similarity index 94% rename from pyinfra/queue/async_tenants_v2.py rename to pyinfra/queue/async_manager.py index 52f1cff..715869e 100644 --- a/pyinfra/queue/async_tenants_v2.py +++ b/pyinfra/queue/async_manager.py @@ -23,11 +23,13 @@ class RabbitMQConfig: password: str heartbeat: int input_queue_prefix: str + tenant_event_queue_suffix: str tenant_exchange_name: str service_request_exchange_name: str service_response_exchange_name: str service_dead_letter_queue_name: str queue_expiration_time: int + pod_name: str connection_params: Dict[str, object] = field(init=False) @@ -41,7 +43,7 @@ class RabbitMQConfig: } -class RabbitMQHandler: +class AsyncQueueManager: def __init__( self, config: RabbitMQConfig, @@ -80,9 +82,8 @@ class RabbitMQHandler: ) async def setup_tenant_queue(self) -> None: - # TODO: Add k8s pod_name to tenant queue name - add DLQ? queue = await self.channel.declare_queue( - "tenant_queue", + f"{self.config.pod_name}_{self.config.tenant_event_queue_suffix}", durable=True, arguments={ "x-dead-letter-exchange": "", @@ -190,13 +191,14 @@ class RabbitMQHandler: try: async with aiohttp.ClientSession() as session: async with session.get(self.tenant_service_url) as response: - # TODO: dont know if we should check for 200, could also be 2xx - # maybe handle bad requests with response.raise_for_status() - if response.status == 200: + response.raise_for_status() + if response.headers["content-type"].lower() == "application/json": data = await response.json() return {tenant["tenantId"] for tenant in data} else: - logger.error(f"Failed to fetch active tenants. Status: {response.status}") + logger.error( + f"Failed to fetch active tenants. Content type is not JSON: {response.headers['content-type'].lower()}" + ) return set() except aiohttp.ClientError as e: logger.error(f"Error fetching active tenants: {e}") @@ -227,7 +229,7 @@ class RabbitMQHandler: logger.info("RabbitMQ handler is running. Press CTRL+C to exit.") await stop.wait() # Run until stop signal received except asyncio.CancelledError: - logger.info("Operation cancelled.") + logger.warning("Operation cancelled.") except Exception as e: logger.error(f"An error occurred: {e}", exc_info=True) finally: @@ -240,5 +242,3 @@ class RabbitMQHandler: if self.connection: await self.connection.close() logger.info("RabbitMQ handler shut down successfully.") - - # TODO: purge_queues diff --git a/pyinfra/queue/async_tenants.py b/pyinfra/queue/async_tenants.py deleted file mode 100644 index 9bdb3a6..0000000 --- a/pyinfra/queue/async_tenants.py +++ /dev/null @@ -1,435 +0,0 @@ -import asyncio -import concurrent.futures -import datetime -import json -import time -import uuid -from typing import Callable, Union - -import aio_pika -import aiormq -import requests -from aio_pika import DeliveryMode, Message -from aio_pika.abc import AbstractIncomingMessage -from dynaconf import Dynaconf -from kn_utils.logging import logger - -from pyinfra.config.loader import ( - load_settings, - local_pyinfra_root_path, - validate_settings, -) -from pyinfra.config.validators import queue_manager_validators - -MessageProcessor = Callable[[dict], dict] - - -class AsyncQueueManager: - - def __init__(self, settings: Dynaconf, message_processor: Callable = None) -> None: - validate_settings(settings, queue_manager_validators) - - self.message_processor = message_processor - self.connection_params = self.get_connection_params(settings) - self.connection = None - self.channel = None - - self.active_tenants = self.get_initial_tenant_ids(tenant_endpoint_url=settings.storage.tenant_server.endpoint) - self.consumer_tasks = {} - - self.connection_sleep = settings.rabbitmq.connection_sleep - self.queue_expiration_time = settings.rabbitmq.queue_expiration_time - - self.tenant_created_queue_name = self.get_tenant_created_queue_name(settings) - self.tenant_deleted_queue_name = self.get_tenant_deleted_queue_name(settings) - self.tenant_events_dlq_name = self.get_tenant_events_dlq_name(settings) - self.tenant_exchange_name = settings.rabbitmq.tenant_exchange_name - - self.service_request_exchange_name = settings.rabbitmq.service_request_exchange_name - self.service_response_exchange_name = settings.rabbitmq.service_response_exchange_name - - self.service_request_queue_prefix = settings.rabbitmq.service_request_queue_prefix - self.service_response_queue_prefix = settings.rabbitmq.service_response_queue_prefix - - self.service_dlq_name = settings.rabbitmq.service_dlq_name - - @staticmethod - def get_connection_params(settings: Dynaconf): - return { - "host": settings.rabbitmq.host, - "port": settings.rabbitmq.port, - "login": settings.rabbitmq.username, - "password": settings.rabbitmq.password, - "client_properties": {"heartbeat": settings.rabbitmq.heartbeat}, - } - - def get_initial_tenant_ids(self, tenant_endpoint_url: str) -> set: - response = requests.get(tenant_endpoint_url, timeout=10) - response.raise_for_status() # Raise an HTTPError for bad responses - - if response.headers["content-type"].lower() == "application/json": - tenants = {tenant["tenantId"] for tenant in response.json()} - return tenants - return set() - - def get_tenant_created_queue_name(self, settings: Dynaconf) -> str: - return self.get_queue_name_with_suffix( - suffix=settings.rabbitmq.tenant_created_event_queue_suffix, pod_name=settings.kubernetes.pod_name - ) - - def get_tenant_deleted_queue_name(self, settings: Dynaconf) -> str: - return self.get_queue_name_with_suffix( - suffix=settings.rabbitmq.tenant_deleted_event_queue_suffix, pod_name=settings.kubernetes.pod_name - ) - - def get_tenant_events_dlq_name(self, settings: Dynaconf) -> str: - return self.get_queue_name_with_suffix( - suffix=settings.rabbitmq.tenant_event_dlq_suffix, pod_name=settings.kubernetes.pod_name - ) - - def get_queue_name_with_suffix(self, suffix: str, pod_name: str) -> str: - if not self.use_default_queue_name() and pod_name: - return f"{pod_name}{suffix}" - return self.get_default_queue_name() - - def use_default_queue_name(self) -> bool: - return False - - def get_default_queue_name(self): - raise NotImplementedError("Queue name method not implemented") - - async def is_ready(self) -> bool: - await self.connect() - return self.channel.is_open - - #### ASYNC STUFF - async def purge_queues(self) -> None: - await self.establish_connection() - try: - for tenant_id in self.active_tenants: - service_request_queue = await self.channel.get_queue(f"{self.service_request_queue_prefix}_{tenant_id}") - await service_request_queue.purge() - service_response_queue = await self.channel.get_queue( - f"{self.service_response_queue_prefix}_{tenant_id}" - ) - await service_response_queue.purge() - logger.info("Queues purged.") - except aio_pika.exceptions.ChannelInvalidStateError: - pass - - async def connect(self): - self.connection = await aio_pika.connect_robust(**self.connection_params) - self.channel = await self.connection.channel() - logger.info("Connection established.") - - async def establish_connection(self): - await self.connect() - await self.initialize_queues() - logger.info("Queues initialized.") - # await self.start_processing() - - async def start_processing(self): - await self.establish_connection() - tenant_events = asyncio.create_task(self.handle_tenant_events()) - service_events = asyncio.create_task(self.start_consumers()) - - await asyncio.gather(tenant_events, service_events) - - async def initialize_queues(self): - await self.channel.set_qos(prefetch_count=1) - - service_request_exchange = await self.channel.declare_exchange( - name=self.service_request_exchange_name, type=aio_pika.ExchangeType.DIRECT, durable=True - ) - service_response_exchange = await self.channel.declare_exchange( - name=self.service_response_exchange_name, type=aio_pika.ExchangeType.DIRECT, durable=True - ) - - for tenant_id in self.active_tenants: - request_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" - request_queue = await self.channel.declare_queue( - name=request_queue_name, - durable=True, - arguments={ - "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.service_dlq_name, - "x-expires": self.queue_expiration_time, # TODO: check if necessary - "x-max-priority": 2, - }, - ) - await request_queue.bind(exchange=service_request_exchange, routing_key=tenant_id) - - response_queue_name = f"{self.service_response_queue_prefix}_{tenant_id}" - response_queue = await self.channel.declare_queue( - name=response_queue_name, - durable=True, - arguments={ - "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.service_dlq_name, - "x-expires": self.queue_expiration_time, # TODO: check if necessary - }, - ) - await response_queue.bind(exchange=service_response_exchange, routing_key=tenant_id) - - async def handle_tenant_events(self): - # Declare the topic exchange for tenant events - exchange = await self.channel.declare_exchange( - self.tenant_exchange_name, aio_pika.ExchangeType.TOPIC, durable=True - ) - # Declare a queue for receiving tenant events - queue = await self.channel.declare_queue( - "tenant_events_queue", - arguments={ - "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.tenant_events_dlq_name, - }, - durable=True, - ) - - await queue.bind(exchange, routing_key="tenant.*") - - async with queue.iterator() as queue_iter: - async for message in queue_iter: - async with message.process(reject_on_redelivered=True): - routing_key = message.routing_key - message_body = json.loads(message.body.decode()) - tenant_id = message_body["tenantId"] - if routing_key == "tenant.created": - # Handle tenant creation - await self.handle_tenant_created(tenant_id) - - elif routing_key == "tenant.deleted": - # Handle tenant deletion - await self.handle_tenant_deleted(tenant_id) - else: - message.nack() - continue - message.ack() - await self.restart_consumers() - - async def handle_tenant_created(self, tenant_id): - # Handle creation of input and output queues for the new tenant - await self.create_tenant_queues(tenant_id) - await self.restart_consumers() - - async def handle_tenant_deleted(self, tenant_id): - # Handle deletion of input and output queues for the tenant - await self.delete_tenant_queues(tenant_id) - await self.restart_consumers() - - async def create_tenant_queues(self, tenant_id): - # Implement queue creation logic for the tenant - queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" - queue = await self.channel.declare_queue( - name=queue_name, - durable=True, - arguments={ - "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.service_dlq_name, - "x-expires": self.queue_expiration_time, # TODO: check if necessary - }, - ) - exchange = await self.channel.get_exchange(self.service_request_exchange_name) - await queue.bind(exchange=exchange, routing_key=tenant_id) - self.active_tenants.add(tenant_id) - logger.info(f"Created queue for tenant {tenant_id}") - - async def delete_tenant_queues(self, tenant_id): - queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" - queue = await self.channel.get_queue(queue_name) - exchange = await self.channel.get_exchange(self.service_request_exchange_name) - await queue.unbind(exchange=exchange, routing_key=tenant_id) - await self.channel.queue_delete(queue_name) - self.active_tenants.discard(tenant_id) - - async def consume_from_request_queue(self, tenant_id): - queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" - queue = await self.channel.get_queue(queue_name) - - async with queue.iterator() as queue_iter: - async for message in queue_iter: - async with message.process(): - on_message_callback = await self._make_on_message_callback(self.message_processor, tenant_id) - await on_message_callback(message) - - async def publish_to_service_response_queue(self, tenant_id, result): - service_response_exchange = await self.channel.get_exchange(self.service_response_exchange_name) - - await service_response_exchange.publish( - Message( - body=json.dumps(result).encode(), - delivery_mode=DeliveryMode.NOT_PERSISTENT, - timestamp=datetime.datetime.now(), - message_id=str(uuid.uuid4()), - ), - routing_key=tenant_id, - ) - - async def restart_consumers(self): - # Stop current consumers and start new ones for active tenants - await self.stop_consumers() - await self.start_consumers() - - async def start_consumers(self): - # Start consuming messages from input queues for active tenants - for tenant_id in self.active_tenants: - if tenant_id not in self.consumer_tasks: - self.consumer_tasks[tenant_id] = asyncio.create_task(self.consume_from_request_queue(tenant_id)) - - consumer_tasks = [self.consume_from_request_queue(tenant) for tenant in self.active_tenants] - await asyncio.gather(*consumer_tasks) - - async def stop_consumers(self): - for task in self.consumer_tasks.values(): - task.cancel() - try: - await task - except asyncio.CancelledError: - pass - self.consumer_tasks.clear() - - async def main_loop(self): - await self.establish_connection() - - async def shutdown(self): - # Implement cleanup logic - await self.stop_consumers() - if self.connection: - await self.connection.close() - - async def _make_on_message_callback(self, message_processor: MessageProcessor, tenant_id: str) -> Callable: - async def process_message_body_and_await_result(unpacked_message_body): - # Processing the message in a separate thread is necessary for the main thread pika client to be able to - # process data events (e.g. heartbeats) while the message is being processed. - # with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_pool_executor: - # logger.info("Processing payload in separate thread.") - # future = thread_pool_executor.submit(message_processor, unpacked_message_body) - - # return future.result() - - return {"result": "lovely"} - - async def on_message_callback(message: AbstractIncomingMessage): - logger.info(f"Received message from queue with delivery_tag {message.delivery_tag}.") - - if message.redelivered: - logger.warning(f"Declining message with {message.delivery_tag=} due to it being redelivered.") - await message.nack(requeue=False) - return - - if message.body.decode("utf-8") == "STOP": - logger.info("Received stop signal, stopping consuming...") - await message.ack() - await self.stop_consumers() - return - - try: - filtered_message_headers = ( - {k: v for k, v in message.properties.headers.items() if k.lower().startswith("x-")} - if message.properties.headers - else {} - ) - logger.debug(f"Processing message with {filtered_message_headers=}.") - result: dict = await ( - process_message_body_and_await_result({**json.loads(message.body), **filtered_message_headers}) - or {} - ) - - await self.publish_to_service_response_queue(tenant_id, result) - logger.info(f"Published result to queue {tenant_id}.") - - await message.ack() - logger.debug(f"Message with {message.delivery_tag=} acknowledged.") - except FileNotFoundError as e: - logger.warning(f"{e}, declining message with {message.delivery_tag=}.") - await message.nack(requeue=False) - except Exception as e: - logger.warning(f"Failed to process message with {message.delivery_tag=}, declining...", exc_info=True) - logger.warning(e) - await message.nack(requeue=False) - raise - - return on_message_callback - - async def publish_message_to_input_queue(self, tenant_id: str, message: Union[str, bytes, dict]) -> None: - if isinstance(message, str): - message = message.encode("utf-8") - elif isinstance(message, dict): - message = json.dumps(message).encode("utf-8") - - await self.establish_connection() - - service_request_exchange = await self.channel.get_exchange(self.service_request_exchange_name) - - await service_request_exchange.publish( - message=Message( - body=message, - delivery_mode=DeliveryMode.NOT_PERSISTENT, - timestamp=datetime.datetime.now(), - message_id=str(uuid.uuid4()), - ), - routing_key=tenant_id, - ) - - logger.info(f"Published message to queue {tenant_id}.") - - async def publish_message_to_tenant_created_queue(self, message: Union[str, bytes, dict]) -> None: - if isinstance(message, str): - message = message.encode("utf-8") - elif isinstance(message, dict): - message = json.dumps(message).encode("utf-8") - - await self.establish_connection() - service_request_exchange = await self.channel.get_exchange(self.tenant_exchange_name) - - await service_request_exchange.publish( - message=Message( - body=message, - delivery_mode=DeliveryMode.NOT_PERSISTENT, - timestamp=datetime.datetime.now(), - message_id=str(uuid.uuid4()), - ), - routing_key="tenant.created", - ) - - logger.info(f"Published message to queue {self.tenant_created_queue_name}.") - - async def publish_message_to_tenant_deleted_queue(self, message: Union[str, bytes, dict]) -> None: - if isinstance(message, str): - message = message.encode("utf-8") - elif isinstance(message, dict): - message = json.dumps(message).encode("utf-8") - - await self.establish_connection() - service_request_exchange = await self.channel.get_exchange(self.tenant_exchange_name) - - await service_request_exchange.publish( - message=Message( - body=message, - delivery_mode=DeliveryMode.NOT_PERSISTENT, - timestamp=datetime.datetime.now(), - message_id=str(uuid.uuid4()), - ), - routing_key="tenant.delete", - ) - - logger.info(f"Published message to queue {self.tenant_deleted_queue_name}.") - - -async def main() -> None: - import time - - settings = load_settings(local_pyinfra_root_path / "config/") - callback = "" - - manager = AsyncQueueManager(settings=settings, message_processor=callback) - - await manager.main_loop() - - while True: - time.sleep(100) - print("keep idling") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/scripts/send_async_request.py b/scripts/send_async_request.py index 1c095d3..4b44cf4 100644 --- a/scripts/send_async_request.py +++ b/scripts/send_async_request.py @@ -9,7 +9,8 @@ from aio_pika.abc import AbstractIncomingMessage from kn_utils.logging import logger from pyinfra.config.loader import load_settings, local_pyinfra_root_path -from pyinfra.queue.async_tenants_v2 import RabbitMQConfig, RabbitMQHandler +from pyinfra.examples import get_rabbitmq_config +from pyinfra.queue.async_manager import AsyncQueueManager from pyinfra.storage.storages.s3 import S3Storage, get_s3_storage_from_settings settings = load_settings(local_pyinfra_root_path / "config/") @@ -88,29 +89,12 @@ def upload_json_and_make_message_body(tenant_id: str): async def test_rabbitmq_handler() -> None: tenant_service_url = settings.storage.tenant_server.endpoint - config = RabbitMQConfig( - host=settings.rabbitmq.host, - port=settings.rabbitmq.port, - username=settings.rabbitmq.username, - password=settings.rabbitmq.password, - heartbeat=settings.rabbitmq.heartbeat, - input_queue_prefix=settings.rabbitmq.service_request_queue_prefix, - tenant_exchange_name=settings.rabbitmq.service_response_queue_prefix, - service_request_exchange_name=settings.rabbitmq.service_request_exchange_name, - service_response_exchange_name=settings.rabbitmq.service_response_exchange_name, - service_dead_letter_queue_name=settings.rabbitmq.service_dlq_name, - queue_expiration_time=settings.rabbitmq.queue_expiration_time, - ) + config = get_rabbitmq_config(settings) - handler = RabbitMQHandler(config, tenant_service_url, dummy_message_processor) + handler = AsyncQueueManager(config, tenant_service_url, dummy_message_processor) await handler.connect() await handler.setup_exchanges() - # await handler.initialize_tenant_queues() - # await handler.setup_tenant_queue() - - # for queue in handler.tenant_queues.values(): - # await queue.purge() tenant_id = "test_tenant" From b8833c7560fa988f83533d1c3590737b6360af98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Wed, 17 Jul 2024 10:51:14 +0200 Subject: [PATCH 26/35] fix: settings mapping --- pyinfra/examples.py | 2 +- scripts/send_threaded_request.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyinfra/examples.py b/pyinfra/examples.py index b1f7f48..6ad1fa8 100644 --- a/pyinfra/examples.py +++ b/pyinfra/examples.py @@ -28,7 +28,7 @@ def get_rabbitmq_config(settings: Dynaconf): heartbeat=settings.rabbitmq.heartbeat, input_queue_prefix=settings.rabbitmq.service_request_queue_prefix, tenant_event_queue_suffix=settings.rabbitmq.tenant_event_queue_suffix, - tenant_exchange_name=settings.rabbitmq.service_response_queue_prefix, + tenant_exchange_name=settings.rabbitmq.tenant_exchange_name, service_request_exchange_name=settings.rabbitmq.service_request_exchange_name, service_response_exchange_name=settings.rabbitmq.service_response_exchange_name, service_dead_letter_queue_name=settings.rabbitmq.service_dlq_name, diff --git a/scripts/send_threaded_request.py b/scripts/send_threaded_request.py index ca9e2b9..6824838 100644 --- a/scripts/send_threaded_request.py +++ b/scripts/send_threaded_request.py @@ -57,7 +57,7 @@ def send_tenant_event(tenant_id: str, event_type: str): def send_service_request(tenant_id: str): queue_manager = ServiceQueueManager(settings) - queue_name = f"{settings.rabbitmq.service_response_queue_prefix}_{tenant_id}" + queue_name = f"service_response_queue_{tenant_id}" queue_manager.purge_queues() From eeb4c3ce292ab6651806b78ad90b5afde71bb5b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Wed, 17 Jul 2024 11:41:31 +0200 Subject: [PATCH 27/35] fix: add await to is_ready --- pyinfra/queue/async_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyinfra/queue/async_manager.py b/pyinfra/queue/async_manager.py index 715869e..abdd38f 100644 --- a/pyinfra/queue/async_manager.py +++ b/pyinfra/queue/async_manager.py @@ -68,7 +68,7 @@ class AsyncQueueManager: async def is_ready(self) -> bool: await self.connect() - return self.channel.is_open + return await self.channel.is_open async def setup_exchanges(self) -> None: self.tenant_exchange = await self.channel.declare_exchange( From 596d4a9bd00c3bb964d6d1b7f8d90e576665fd30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Mon, 22 Jul 2024 16:48:31 +0200 Subject: [PATCH 28/35] feat: add expiration for tenant event queue and retry to tenant api call --- pyinfra/queue/async_manager.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pyinfra/queue/async_manager.py b/pyinfra/queue/async_manager.py index abdd38f..be7b02d 100644 --- a/pyinfra/queue/async_manager.py +++ b/pyinfra/queue/async_manager.py @@ -13,6 +13,7 @@ from aio_pika.abc import ( AbstractIncomingMessage, ) from kn_utils.logging import logger +from retry import retry @dataclass @@ -88,6 +89,8 @@ class AsyncQueueManager: arguments={ "x-dead-letter-exchange": "", "x-dead-letter-routing-key": self.config.service_dead_letter_queue_name, + "x-expires": self.config.queue_expiration_time, + "x-max-priority": 2, }, ) await queue.bind(self.tenant_exchange, routing_key="tenant.*") @@ -187,6 +190,7 @@ class AsyncQueueManager: ) logger.info(f"Published result to queue {tenant_id}.") + @retry(tries=3, delay=5, jitter=(1, 3), logger=logger, exceptions=(aiohttp.ClientError)) async def fetch_active_tenants(self) -> Set[str]: try: async with aiohttp.ClientSession() as session: From 28451e8f8f5a3e9507d4c11cf069e7453c7d062b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Mon, 22 Jul 2024 16:54:28 +0200 Subject: [PATCH 29/35] chore: bump pyinfra version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index adbd8da..8cbc932 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "pyinfra" -version = "2.3.0" +version = "3.0.0" description = "" authors = ["Team Research "] license = "All rights reseverd" From 1520e96287d83b2d967712681b8916e57f8f7bec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Mon, 22 Jul 2024 16:57:02 +0200 Subject: [PATCH 30/35] refactor: cleanup codebase --- pyinfra/queue/sequential_tenants.py | 343 ---------------------- pyinfra/queue/threaded_tenants.py | 432 ---------------------------- scripts/send_threaded_request.py | 100 ------- 3 files changed, 875 deletions(-) delete mode 100644 pyinfra/queue/sequential_tenants.py delete mode 100644 pyinfra/queue/threaded_tenants.py delete mode 100644 scripts/send_threaded_request.py diff --git a/pyinfra/queue/sequential_tenants.py b/pyinfra/queue/sequential_tenants.py deleted file mode 100644 index b1eb70f..0000000 --- a/pyinfra/queue/sequential_tenants.py +++ /dev/null @@ -1,343 +0,0 @@ -import atexit -import concurrent.futures -import json -import logging -import requests -import signal -import sys -import time -from typing import Callable, Union - -import pika -import pika.exceptions -from dynaconf import Dynaconf -from kn_utils.logging import logger -from pika.adapters.blocking_connection import BlockingChannel, BlockingConnection -from retry import retry - -from pyinfra.config.loader import validate_settings -from pyinfra.config.validators import queue_manager_validators - -logger.set_level("DEBUG") -pika_logger = logging.getLogger("pika") -pika_logger.setLevel(logging.WARNING) # disables non-informative pika log clutter - -MessageProcessor = Callable[[dict], dict] - - -class QueueManager: - def __init__(self, settings: Dynaconf): - validate_settings(settings, queue_manager_validators) - - self.tenant_created_queue_name = self.get_tenant_created_queue_name(settings) - self.tenant_deleted_queue_name = self.get_tenant_deleted_queue_name(settings) - self.tenant_events_dlq_name = self.get_tenant_events_dlq_name(settings) - - self.connection_sleep = settings.rabbitmq.connection_sleep - self.queue_expiration_time = settings.rabbitmq.queue_expiration_time - - self.tenant_exchange_name = settings.rabbitmq.tenant_exchange_name - self.service_request_exchange_name = settings.rabbitmq.service_request_exchange_name - self.service_response_exchange_name = settings.rabbitmq.service_response_exchange_name - - self.service_queue_prefix = settings.rabbitmq.service_request_queue_prefix - self.service_dlq_name = settings.rabbitmq.service_dlq_name - - self.connection_parameters = self.create_connection_parameters(settings) - - self.connection: Union[BlockingConnection, None] = None - self.channel: Union[BlockingChannel, None] = None - - self.tenant_ids = self.get_initial_tenant_ids(tenant_endpoint_url=settings.storage.tenant_server.endpoint) - - self._consuming = False - - atexit.register(self.stop_consuming) - signal.signal(signal.SIGTERM, self._handle_stop_signal) - signal.signal(signal.SIGINT, self._handle_stop_signal) - - @staticmethod - def create_connection_parameters(settings: Dynaconf): - credentials = pika.PlainCredentials(username=settings.rabbitmq.username, password=settings.rabbitmq.password) - pika_connection_params = { - "host": settings.rabbitmq.host, - "port": settings.rabbitmq.port, - "credentials": credentials, - "heartbeat": settings.rabbitmq.heartbeat, - } - - return pika.ConnectionParameters(**pika_connection_params) - - @retry(tries=3, delay=5, jitter=(1, 3), logger=logger, exceptions=requests.exceptions.HTTPError) - def get_initial_tenant_ids(self, tenant_endpoint_url: str) -> list: - try: - response = requests.get(tenant_endpoint_url, timeout=10) - response.raise_for_status() # Raise an HTTPError for bad responses - - if response.headers["content-type"].lower() == "application/json": - tenants = [tenant["tenantId"] for tenant in response.json()] - else: - logger.warning("Response is not in JSON format.") - except Exception as e: - logger.warning("An unexpected error occurred:", e) - - return tenants - - def get_tenant_created_queue_name(self, settings: Dynaconf): - return self.get_queue_name_with_suffix( - suffix=settings.rabbitmq.tenant_created_event_queue_suffix, pod_name=settings.kubernetes.pod_name - ) - - def get_tenant_deleted_queue_name(self, settings: Dynaconf): - return self.get_queue_name_with_suffix( - suffix=settings.rabbitmq.tenant_deleted_event_queue_suffix, pod_name=settings.kubernetes.pod_name - ) - - def get_tenant_events_dlq_name(self, settings: Dynaconf): - return self.get_queue_name_with_suffix( - suffix=settings.rabbitmq.tenant_event_dlq_suffix, pod_name=settings.kubernetes.pod_name - ) - - def get_queue_name_with_suffix(self, suffix: str, pod_name: str): - if not self.use_default_queue_name() and pod_name: - return f"{pod_name}{suffix}" - return self.get_default_queue_name() - - def use_default_queue_name(self): - return False - - def get_default_queue_name(self): - raise NotImplementedError("Queue name method not implemented") - - @retry(tries=3, delay=5, jitter=(1, 3), logger=logger) - def establish_connection(self): - # TODO: set sensible retry parameters - if self.connection and self.connection.is_open: - logger.debug("Connection to RabbitMQ already established.") - return - - logger.info("Establishing connection to RabbitMQ...") - logger.info(self.__class__.__name__) - self.connection = pika.BlockingConnection(parameters=self.connection_parameters) - - logger.debug("Opening channel...") - self.channel = self.connection.channel() - self.channel.basic_qos(prefetch_count=1) - - args = { - "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.tenant_events_dlq_name, - } - - ### Declare exchanges for tenants and responses - self.channel.exchange_declare(exchange=self.tenant_exchange_name, exchange_type="topic") - self.channel.exchange_declare(exchange=self.service_request_exchange_name, exchange_type="direct") - self.channel.exchange_declare(exchange=self.service_response_exchange_name, exchange_type="direct") - - self.channel.queue_declare(self.tenant_created_queue_name, arguments=args, auto_delete=False, durable=True) - self.channel.queue_declare(self.tenant_deleted_queue_name, arguments=args, auto_delete=False, durable=True) - - self.channel.queue_bind( - exchange=self.tenant_exchange_name, queue=self.tenant_created_queue_name, routing_key="tenant.created" - ) - self.channel.queue_bind( - exchange=self.tenant_exchange_name, queue=self.tenant_deleted_queue_name, routing_key="tenant.delete" - ) - - for tenant_id in self.tenant_ids: - queue_name = self.service_queue_prefix + "_" + tenant_id - self.channel.queue_declare( - queue=queue_name, - durable=True, - arguments={ - "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.service_dlq_name, - "x-expires": self.queue_expiration_time, # TODO: check if necessary - "x-max-priority": 2, - }, - ) - self.channel.queue_bind( - queue=queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id - ) - - logger.info("Connection to RabbitMQ established, channel open.") - - def is_ready(self): - self.establish_connection() - return self.channel.is_open - - @retry(exceptions=pika.exceptions.AMQPConnectionError, tries=3, delay=5, jitter=(1, 3), logger=logger) - def start_sequential_consume(self, message_processor: Callable): - - self.establish_connection() - self._consuming = True - - try: - while self._consuming: - for tenant_id in self.tenant_ids: - queue_name = self.service_queue_prefix + "_" + tenant_id - method_frame, properties, body = self.channel.basic_get(queue_name) - if method_frame: - on_message_callback = self._make_on_message_callback(message_processor, tenant_id) - on_message_callback(self.channel, method_frame, properties, body) - else: - logger.debug("No message returned") - time.sleep(self.connection_sleep) - - ### Handle tenant events - self.check_tenant_created_queue() - self.check_tenant_deleted_queue() - - except KeyboardInterrupt: - logger.info("Exiting...") - finally: - self.stop_consuming() - - def check_tenant_created_queue(self): - while True: - method_frame, properties, body = self.channel.basic_get(self.tenant_created_queue_name) - if method_frame: - self.channel.basic_ack(delivery_tag=method_frame.delivery_tag) - message = json.loads(body) - tenant_id = message["tenantId"] - self.on_tenant_created(tenant_id) - else: - logger.debug("No more tenant created events.") - break - - def check_tenant_deleted_queue(self): - while True: - method_frame, properties, body = self.channel.basic_get(self.tenant_deleted_queue_name) - if method_frame: - self.channel.basic_ack(delivery_tag=method_frame.delivery_tag) - message = json.loads(body) - tenant_id = message["tenantId"] - self.on_tenant_deleted(tenant_id) - else: - logger.debug("No more tenant deleted events.") - break - - def on_tenant_created(self, tenant_id: str): - queue_name = self.service_queue_prefix + "_" + tenant_id - self.channel.queue_declare( - queue=queue_name, - durable=True, - arguments={ - "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.service_dlq_name, - "x-expires": self.queue_expiration_time, # TODO: check if necessary - }, - ) - self.channel.queue_bind(queue=queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) - self.tenant_ids.append(tenant_id) - - def on_tenant_deleted(self, tenant_id: str): - queue_name = self.service_queue_prefix + "_" + tenant_id - self.channel.queue_unbind(queue=queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) - self.channel.queue_delete(queue_name) - self.tenant_ids.remove(tenant_id) - - def stop_consuming(self): - self._consuming = False - if self.channel and self.channel.is_open: - logger.info("Stopping consuming...") - self.channel.stop_consuming() - logger.info("Closing channel...") - self.channel.close() - - if self.connection and self.connection.is_open: - logger.info("Closing connection to RabbitMQ...") - self.connection.close() - - def publish_message_to_input_queue( - self, tenant_id: str, message: Union[str, bytes, dict], properties: pika.BasicProperties = None - ): - if isinstance(message, str): - message = message.encode("utf-8") - elif isinstance(message, dict): - message = json.dumps(message).encode("utf-8") - - self.establish_connection() - self.channel.basic_publish( - exchange=self.service_request_exchange_name, - routing_key=tenant_id, - properties=properties, - body=message, - ) - logger.info(f"Published message to queue {tenant_id}.") - - def purge_queues(self): - self.establish_connection() - try: - self.channel.queue_purge(self.tenant_created_queue_name) - self.channel.queue_purge(self.tenant_deleted_queue_name) - for tenant_id in self.tenant_ids: - queue_name = self.service_queue_prefix + "_" + tenant_id - self.channel.queue_purge(queue_name) - logger.info("Queues purged.") - except pika.exceptions.ChannelWrongStateError: - pass - - def get_message_from_output_queue(self, queue: str): - self.establish_connection() - return self.channel.basic_get(queue, auto_ack=True) - - def _make_on_message_callback(self, message_processor: MessageProcessor, tenant_id: str): - def process_message_body_and_await_result(unpacked_message_body): - # Processing the message in a separate thread is necessary for the main thread pika client to be able to - # process data events (e.g. heartbeats) while the message is being processed. - with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_pool_executor: - logger.info("Processing payload in separate thread.") - future = thread_pool_executor.submit(message_processor, unpacked_message_body) - - return future.result() - - def on_message_callback(channel, method, properties, body): - logger.info(f"Received message from queue with delivery_tag {method.delivery_tag}.") - - if method.redelivered: - logger.warning(f"Declining message with {method.delivery_tag=} due to it being redelivered.") - channel.basic_nack(method.delivery_tag, requeue=False) - return - - if body.decode("utf-8") == "STOP": - logger.info(f"Received stop signal, stopping consuming...") - channel.basic_ack(delivery_tag=method.delivery_tag) - self.stop_consuming() - return - - try: - filtered_message_headers = ( - {k: v for k, v in properties.headers.items() if k.lower().startswith("x-")} - if properties.headers - else {} - ) - logger.debug(f"Processing message with {filtered_message_headers=}.") - result: dict = ( - process_message_body_and_await_result({**json.loads(body), **filtered_message_headers}) or {} - ) - - channel.basic_publish( - exchange=self.service_response_exchange_name, - routing_key=tenant_id, - body=json.dumps(result).encode(), - properties=pika.BasicProperties(headers=filtered_message_headers), - ) - logger.info(f"Published result to queue {tenant_id}.") - - channel.basic_ack(delivery_tag=method.delivery_tag) - logger.debug(f"Message with {method.delivery_tag=} acknowledged.") - except FileNotFoundError as e: - logger.warning(f"{e}, declining message with {method.delivery_tag=}.") - channel.basic_nack(method.delivery_tag, requeue=False) - except Exception: - logger.warning(f"Failed to process message with {method.delivery_tag=}, declining...", exc_info=True) - channel.basic_nack(method.delivery_tag, requeue=False) - raise - - return on_message_callback - - def _handle_stop_signal(self, signum, *args, **kwargs): - logger.info(f"Received signal {signum}, stopping consuming...") - self.stop_consuming() - sys.exit(0) diff --git a/pyinfra/queue/threaded_tenants.py b/pyinfra/queue/threaded_tenants.py deleted file mode 100644 index 219f3d2..0000000 --- a/pyinfra/queue/threaded_tenants.py +++ /dev/null @@ -1,432 +0,0 @@ -import atexit -import concurrent.futures -import pika -import queue -import json -import logging -import signal -import sys -import requests -import time -import threading -import pika.exceptions -from dynaconf import Dynaconf -from typing import Callable, Union -from pika.adapters.blocking_connection import BlockingChannel, BlockingConnection -from kn_utils.logging import logger -from pika.channel import Channel -from retry import retry - -from pyinfra.config.loader import validate_settings -from pyinfra.config.validators import queue_manager_validators - -logger.set_level("DEBUG") -pika_logger = logging.getLogger("pika") -pika_logger.setLevel(logging.WARNING) # disables non-informative pika log clutter - -MessageProcessor = Callable[[dict], dict] - - -class BaseQueueManager: - tenant_exchange_queue = queue.Queue() - _connection = None - _lock = threading.Lock() - should_stop = threading.Event() - - def __init__(self, settings: Dynaconf): - validate_settings(settings, queue_manager_validators) - - self.connection_parameters = self.create_connection_parameters(settings) - self.connection: Union[BlockingConnection, None] = None - self.channel: Union[BlockingChannel, None] = None - self.connection_sleep = settings.rabbitmq.connection_sleep - self.queue_expiration_time = settings.rabbitmq.queue_expiration_time - self.tenant_exchange_name = settings.rabbitmq.tenant_exchange_name - - atexit.register(self.stop_consuming) - signal.signal(signal.SIGTERM, self._handle_stop_signal) - signal.signal(signal.SIGINT, self._handle_stop_signal) - - @staticmethod - def create_connection_parameters(settings: Dynaconf) -> pika.ConnectionParameters: - credentials = pika.PlainCredentials(username=settings.rabbitmq.username, password=settings.rabbitmq.password) - pika_connection_params = { - "host": settings.rabbitmq.host, - "port": settings.rabbitmq.port, - "credentials": credentials, - "heartbeat": settings.rabbitmq.heartbeat, - } - return pika.ConnectionParameters(**pika_connection_params) - - def get_connection(self) -> BlockingConnection: - with self._lock: - if not self._connection or self._connection.is_closed: - self._connection = pika.BlockingConnection(self.connection_parameters) - return self._connection - - @retry(tries=3, delay=5, jitter=(1, 3), logger=logger) - def establish_connection(self) -> None: - logger.info(f"Establishing connection to RabbitMQ for {self.__class__.__name__}...") - self.connection = self.get_connection() - if not self.channel or self.channel.is_closed: - logger.debug("Opening channel...") - self.channel = self.connection.channel() - self.channel.basic_qos(prefetch_count=1) - self.initialize_queues() - logger.info(f"Connection to RabbitMQ established for {self.__class__.__name__}, channel open.") - - def is_ready(self) -> bool: - self.establish_connection() - return self.channel.is_open - - def initialize_queues(self) -> None: - raise NotImplementedError("Subclasses should implement this method") - - def stop_consuming(self) -> None: - if not self.should_stop.is_set(): - self.should_stop.set() - if self.channel and self.channel.is_open: - try: - self.channel.stop_consuming() - self.channel.close() - except Exception as e: - logger.error(f"Error stopping consuming: {e}", exc_info=True) - if self.connection and self.connection.is_open: - try: - self.connection.close() - except Exception as e: - logger.error(f"Error closing connection: {e}", exc_info=True) - - def _handle_stop_signal(self, signum, *args, **kwargs) -> None: - logger.info(f"Received signal {signum}, stopping consuming...") - self.stop_consuming() - sys.exit(0) - - -class TenantQueueManager(BaseQueueManager): - def __init__(self, settings: Dynaconf): - super().__init__(settings) - - self.tenant_created_queue_name = self.get_tenant_created_queue_name(settings) - self.tenant_deleted_queue_name = self.get_tenant_deleted_queue_name(settings) - self.tenant_events_dlq_name = self.get_tenant_events_dlq_name(settings) - - def initialize_queues(self) -> None: - self.channel.exchange_declare(exchange=self.tenant_exchange_name, exchange_type="topic", durable=True) - - self.channel.queue_declare( - queue=self.tenant_created_queue_name, - arguments={ - "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.tenant_events_dlq_name, - }, - durable=True, - ) - self.channel.queue_declare( - queue=self.tenant_deleted_queue_name, - arguments={ - "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.tenant_events_dlq_name, - }, - durable=True, - ) - self.channel.queue_declare( - queue=self.tenant_events_dlq_name, - durable=True, - ) - - self.channel.queue_bind( - exchange=self.tenant_exchange_name, queue=self.tenant_created_queue_name, routing_key="tenant.created" - ) - self.channel.queue_bind( - exchange=self.tenant_exchange_name, queue=self.tenant_deleted_queue_name, routing_key="tenant.delete" - ) - - @retry(exceptions=pika.exceptions.AMQPConnectionError, tries=3, delay=5, jitter=(1, 3), logger=logger) - def start_consuming(self) -> None: - - try: - self.establish_connection() - self.channel.basic_consume(queue=self.tenant_created_queue_name, on_message_callback=self.on_tenant_created) - self.channel.basic_consume(queue=self.tenant_deleted_queue_name, on_message_callback=self.on_tenant_deleted) - self.channel.start_consuming() - except Exception: - logger.error("An unexpected error occurred while consuming messages. Consuming will stop.", exc_info=True) - raise - finally: - self.stop_consuming() - - def get_tenant_created_queue_name(self, settings: Dynaconf) -> str: - return self.get_queue_name_with_suffix( - suffix=settings.rabbitmq.tenant_created_event_queue_suffix, pod_name=settings.kubernetes.pod_name - ) - - def get_tenant_deleted_queue_name(self, settings: Dynaconf) -> str: - return self.get_queue_name_with_suffix( - suffix=settings.rabbitmq.tenant_deleted_event_queue_suffix, pod_name=settings.kubernetes.pod_name - ) - - def get_tenant_events_dlq_name(self, settings: Dynaconf) -> str: - return self.get_queue_name_with_suffix( - suffix=settings.rabbitmq.tenant_event_dlq_suffix, pod_name=settings.kubernetes.pod_name - ) - - def get_queue_name_with_suffix(self, suffix: str, pod_name: str) -> str: - if not self.use_default_queue_name() and pod_name: - return f"{pod_name}{suffix}" - return self.get_default_queue_name() - - def use_default_queue_name(self) -> bool: - return False - - def get_default_queue_name(self): - raise NotImplementedError("Queue name method not implemented") - - def on_tenant_created(self, ch: Channel, method, properties, body) -> None: - logger.info("Received tenant created event") - message = json.loads(body) - ch.basic_ack(delivery_tag=method.delivery_tag) - - tenant_id = message["tenantId"] - self.tenant_exchange_queue.put(("create", tenant_id)) - - def on_tenant_deleted(self, ch: Channel, method, properties, body) -> None: - logger.info("Received tenant deleted event") - message = json.loads(body) - ch.basic_ack(delivery_tag=method.delivery_tag) - - tenant_id = message["tenantId"] - self.tenant_exchange_queue.put(("delete", tenant_id)) - - def purge_queues(self) -> None: - self.establish_connection() - try: - self.channel.queue_purge(self.tenant_created_queue_name) - self.channel.queue_purge(self.tenant_deleted_queue_name) - logger.info("Queues purged.") - except pika.exceptions.ChannelWrongStateError: - pass - - def publish_message_to_tenant_created_queue( - self, message: Union[str, bytes, dict], properties: pika.BasicProperties = None - ) -> None: - if isinstance(message, str): - message = message.encode("utf-8") - elif isinstance(message, dict): - message = json.dumps(message).encode("utf-8") - - self.establish_connection() - self.channel.basic_publish( - exchange=self.tenant_exchange_name, - routing_key="tenant.created", - properties=properties, - body=message, - ) - logger.info(f"Published message to queue {self.tenant_created_queue_name}.") - - def publish_message_to_tenant_deleted_queue( - self, message: Union[str, bytes, dict], properties: pika.BasicProperties = None - ) -> None: - if isinstance(message, str): - message = message.encode("utf-8") - elif isinstance(message, dict): - message = json.dumps(message).encode("utf-8") - - self.establish_connection() - self.channel.basic_publish( - exchange=self.tenant_exchange_name, - routing_key="tenant.delete", - properties=properties, - body=message, - ) - logger.info(f"Published message to queue {self.tenant_deleted_queue_name}.") - - -class ServiceQueueManager(BaseQueueManager): - def __init__(self, settings: Dynaconf): - super().__init__(settings) - - self.service_request_exchange_name = settings.rabbitmq.service_request_exchange_name - self.service_response_exchange_name = settings.rabbitmq.service_response_exchange_name - - self.service_request_queue_prefix = settings.rabbitmq.service_request_queue_prefix - - self.service_dlq_name = settings.rabbitmq.service_dlq_name - - self.tenant_ids = self.get_initial_tenant_ids(tenant_endpoint_url=settings.storage.tenant_server.endpoint) - - def initialize_queues(self) -> None: - self.channel.exchange_declare(exchange=self.service_request_exchange_name, exchange_type="direct", durable=True) - self.channel.exchange_declare(exchange=self.service_response_exchange_name, exchange_type="direct", durable=True) - - for tenant_id in self.tenant_ids: - request_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" - self.channel.queue_declare( - queue=request_queue_name, - durable=True, - arguments={ - "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.service_dlq_name, - "x-expires": self.queue_expiration_time, # TODO: check if necessary - "x-max-priority": 2, - }, - ) - self.channel.queue_bind( - queue=request_queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id - ) - - @retry(tries=3, delay=5, jitter=(1, 3), logger=logger, exceptions=(requests.exceptions.HTTPError, requests.exceptions.ConnectionError)) - def get_initial_tenant_ids(self, tenant_endpoint_url: str) -> list: - response = requests.get(tenant_endpoint_url, timeout=10) - response.raise_for_status() # Raise an HTTPError for bad responses - - if response.headers["content-type"].lower() == "application/json": - tenants = [tenant["tenantId"] for tenant in response.json()] - return tenants - return [] - - @retry(exceptions=pika.exceptions.AMQPConnectionError, tries=3, delay=5, jitter=(1, 3), logger=logger) - def start_sequential_basic_get(self, message_processor: Callable) -> None: - - self.establish_connection() - try: - while not self.should_stop.is_set(): - for tenant_id in self.tenant_ids: - queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" - method_frame, properties, body = self.channel.basic_get(queue_name) - if method_frame: - logger.debug("PROCESSING MESSAGE") - on_message_callback = self._make_on_message_callback(message_processor, tenant_id) - on_message_callback(self.channel, method_frame, properties, body) - else: - logger.debug(f"No message returned for queue {queue_name}") - # time.sleep(self.connection_sleep) - time.sleep(0.1) - - ### Handle tenant events - self.check_tenant_exchange() - - - except KeyboardInterrupt: - logger.info("Exiting...") - finally: - self.stop_consuming() - - def check_tenant_exchange(self) -> None: - while not self.tenant_exchange_queue.empty(): - try: - event, tenant = self.tenant_exchange_queue.get_nowait() - if event == "create": - self.on_tenant_created(tenant) - elif event == "delete": - self.on_tenant_deleted(tenant) - except queue.Empty: - # time.sleep(self.connection_sleep) - break - - - def publish_message_to_input_queue( - self, tenant_id: str, message: Union[str, bytes, dict], properties: pika.BasicProperties = None - ) -> None: - if isinstance(message, str): - message = message.encode("utf-8") - elif isinstance(message, dict): - message = json.dumps(message).encode("utf-8") - - self.establish_connection() - self.channel.basic_publish( - exchange=self.service_request_exchange_name, - routing_key=tenant_id, - properties=properties, - body=message, - ) - logger.info(f"Published message to queue {tenant_id}.") - - def purge_queues(self) -> None: - self.establish_connection() - try: - for tenant_id in self.tenant_ids: - request_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" - self.channel.queue_purge(request_queue_name) - logger.info("Queues purged.") - except pika.exceptions.ChannelWrongStateError: - pass - - def on_tenant_created(self, tenant_id: str) -> None: - request_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" - self.channel.queue_declare( - queue=request_queue_name, - durable=True, - arguments={ - "x-dead-letter-exchange": "", - "x-dead-letter-routing-key": self.service_dlq_name, - "x-expires": self.queue_expiration_time, # TODO: check if necessary - }, - ) - self.channel.queue_bind(queue=request_queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) - - self.tenant_ids.append(tenant_id) - logger.debug(f"Added tenant {tenant_id}.") - - def on_tenant_deleted(self, tenant_id: str) -> None: - request_queue_name = f"{self.service_request_queue_prefix}_{tenant_id}" - self.channel.queue_unbind(queue=request_queue_name, exchange=self.service_request_exchange_name, routing_key=tenant_id) - self.channel.queue_delete(request_queue_name) - - self.tenant_ids.remove(tenant_id) - logger.debug(f"Deleted tenant {tenant_id}.") - - def _make_on_message_callback(self, message_processor: MessageProcessor, tenant_id: str) -> Callable: - def process_message_body_and_await_result(unpacked_message_body): - # Processing the message in a separate thread is necessary for the main thread pika client to be able to - # process data events (e.g. heartbeats) while the message is being processed. - with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_pool_executor: - logger.info("Processing payload in separate thread.") - future = thread_pool_executor.submit(message_processor, unpacked_message_body) - - return future.result() - - def on_message_callback(channel, method, properties, body): - logger.info(f"Received message from queue with delivery_tag {method.delivery_tag}.") - - if method.redelivered: - logger.warning(f"Declining message with {method.delivery_tag=} due to it being redelivered.") - channel.basic_nack(method.delivery_tag, requeue=False) - return - - if body.decode("utf-8") == "STOP": - logger.info(f"Received stop signal, stopping consuming...") - channel.basic_ack(delivery_tag=method.delivery_tag) - self.stop_consuming() - return - - try: - filtered_message_headers = ( - {k: v for k, v in properties.headers.items() if k.lower().startswith("x-")} - if properties.headers - else {} - ) - logger.debug(f"Processing message with {filtered_message_headers=}.") - result: dict = ( - process_message_body_and_await_result({**json.loads(body), **filtered_message_headers}) or {} - ) - - channel.basic_publish( - exchange=self.service_response_exchange_name, - routing_key=tenant_id, - body=json.dumps(result).encode(), - properties=pika.BasicProperties(headers=filtered_message_headers), - ) - logger.info(f"Published result to queue {tenant_id}.") - - channel.basic_ack(delivery_tag=method.delivery_tag) - logger.debug(f"Message with {method.delivery_tag=} acknowledged.") - except FileNotFoundError as e: - logger.warning(f"{e}, declining message with {method.delivery_tag=}.") - channel.basic_nack(method.delivery_tag, requeue=False) - except Exception: - logger.warning(f"Failed to process message with {method.delivery_tag=}, declining...", exc_info=True) - channel.basic_nack(method.delivery_tag, requeue=False) - raise - - return on_message_callback \ No newline at end of file diff --git a/scripts/send_threaded_request.py b/scripts/send_threaded_request.py deleted file mode 100644 index 6824838..0000000 --- a/scripts/send_threaded_request.py +++ /dev/null @@ -1,100 +0,0 @@ -import gzip -import json -import time -from operator import itemgetter -from threading import Thread - -from kn_utils.logging import logger - -from pyinfra.config.loader import load_settings, local_pyinfra_root_path -from pyinfra.queue.threaded_tenants import ServiceQueueManager, TenantQueueManager -from pyinfra.storage.storages.s3 import get_s3_storage_from_settings - -settings = load_settings(local_pyinfra_root_path / "config/") - - -def upload_json_and_make_message_body(tenant_id: str): - dossier_id, file_id, suffix = "dossier", "file", "json.gz" - content = { - "numberOfPages": 7, - "sectionTexts": "data", - } - - object_name = f"{tenant_id}/{dossier_id}/{file_id}.{suffix}" - data = gzip.compress(json.dumps(content).encode("utf-8")) - - storage = get_s3_storage_from_settings(settings) - if not storage.has_bucket(): - storage.make_bucket() - storage.put_object(object_name, data) - - message_body = { - "tenantId": tenant_id, - "dossierId": dossier_id, - "fileId": file_id, - "targetFileExtension": suffix, - "responseFileExtension": f"result.{suffix}", - } - return message_body - - -def tenant_event_message(tenant_id: str): - return {"tenantId": tenant_id} - - -def send_tenant_event(tenant_id: str, event_type: str): - queue_manager = TenantQueueManager(settings) - queue_manager.purge_queues() - message = tenant_event_message(tenant_id) - if event_type == "create": - queue_manager.publish_message_to_tenant_created_queue(message=message) - elif event_type == "delete": - queue_manager.publish_message_to_tenant_deleted_queue(message=message) - else: - logger.warning(f"Event type '{event_type}' not known.") - queue_manager.stop_consuming() - - -def send_service_request(tenant_id: str): - queue_manager = ServiceQueueManager(settings) - queue_name = f"service_response_queue_{tenant_id}" - - queue_manager.purge_queues() - - message = upload_json_and_make_message_body(tenant_id) - - queue_manager.publish_message_to_input_queue(tenant_id=tenant_id, message=message) - logger.info(f"Put {message} on {queue_name}.") - - storage = get_s3_storage_from_settings(settings) - - for method_frame, properties, body in queue_manager.channel.consume(queue=queue_name, inactivity_timeout=15): - if not body: - break - response = json.loads(body) - logger.info(f"Received {response}") - logger.info(f"Message headers: {properties.headers}") - queue_manager.channel.basic_ack(method_frame.delivery_tag) - tenant_id, dossier_id, file_id = itemgetter("tenantId", "dossierId", "fileId")(response) - suffix = message["responseFileExtension"] - print(f"{tenant_id}/{dossier_id}/{file_id}.{suffix}") - result = storage.get_object(f"{tenant_id}/{dossier_id}/{file_id}.{suffix}") - result = json.loads(gzip.decompress(result)) - logger.info(f"Contents of result on storage: {result}") - break - queue_manager.stop_consuming() - - -if __name__ == "__main__": - import uuid - - unique_ids = [str(uuid.uuid4()) for _ in range(100)] - - for tenant in unique_ids: - send_tenant_event(tenant_id=tenant, event_type="create") - - # for tenant in tenant_ids: - # send_service_request(tenant_id=tenant) - - # for tenant in tenant_ids: - # send_tenant_event(tenant_id=tenant, event_type="delete") From 13d670091ca0e4b397c38af6285d8d97e6e962bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Mon, 22 Jul 2024 17:31:32 +0200 Subject: [PATCH 31/35] chore: update readme --- README.md | 71 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index f497131..4933d19 100755 --- a/README.md +++ b/README.md @@ -30,34 +30,47 @@ The following table shows all necessary settings. You can find a preconfigured s bitbucket. These are the complete settings, you only need all if using all features of the service as described in the [complete example](pyinfra/examples.py). -| Environment Variable | Internal / .toml Name | Description | -|--------------------------------------|------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| LOGGING__LEVEL | logging.level | Log level | -| METRICS__PROMETHEUS__ENABLED | metrics.prometheus.enabled | Enable Prometheus metrics collection | -| METRICS__PROMETHEUS__PREFIX | metrics.prometheus.prefix | Prefix for Prometheus metrics (e.g. {product}-{service}) | -| WEBSERVER__HOST | webserver.host | Host of the webserver (offering e.g. /prometheus, /ready and /health endpoints) | -| WEBSERVER__PORT | webserver.port | Port of the webserver | -| RABBITMQ__HOST | rabbitmq.host | Host of the RabbitMQ server | -| RABBITMQ__PORT | rabbitmq.port | Port of the RabbitMQ server | -| RABBITMQ__USERNAME | rabbitmq.username | Username for the RabbitMQ server | -| RABBITMQ__PASSWORD | rabbitmq.password | Password for the RabbitMQ server | -| RABBITMQ__HEARTBEAT | rabbitmq.heartbeat | Heartbeat for the RabbitMQ server | -| RABBITMQ__CONNECTION_SLEEP | rabbitmq.connection_sleep | Sleep time intervals during message processing. Has to be a divider of heartbeat, and shouldn't be too big, since only in these intervals queue interactions happen (like receiving new messages) This is also the minimum time the service needs to process a message. | -| RABBITMQ__INPUT_QUEUE | rabbitmq.input_queue | Name of the input queue | -| RABBITMQ__OUTPUT_QUEUE | rabbitmq.output_queue | Name of the output queue | -| RABBITMQ__DEAD_LETTER_QUEUE | rabbitmq.dead_letter_queue | Name of the dead letter queue | -| STORAGE__BACKEND | storage.backend | Storage backend to use (currently only "s3" and "azure" are supported) | -| STORAGE__S3__BUCKET | storage.s3.bucket | Name of the S3 bucket | -| STORAGE__S3__ENDPOINT | storage.s3.endpoint | Endpoint of the S3 server | -| STORAGE__S3__KEY | storage.s3.key | Access key for the S3 server | -| STORAGE__S3__SECRET | storage.s3.secret | Secret key for the S3 server | -| STORAGE__S3__REGION | storage.s3.region | Region of the S3 server | -| STORAGE__AZURE__CONTAINER | storage.azure.container_name | Name of the Azure container | -| STORAGE__AZURE__CONNECTION_STRING | storage.azure.connection_string | Connection string for the Azure server | -| STORAGE__TENANT_SERVER__PUBLIC_KEY | storage.tenant_server.public_key | Public key of the tenant server | -| STORAGE__TENANT_SERVER__ENDPOINT | storage.tenant_server.endpoint | Endpoint of the tenant server | -| TRACING__OPENTELEMETRY__ENDPOINT | tracing.opentelemetry.endpoint | Endpoint to which OpenTelemetry traces are exported -| TRACING__OPENTELEMETRY__SERVICE_NAME | tracing.opentelemetry.service_name | Name of the service as displayed in the traces collected +| Environment Variable | Internal / .toml Name | Description | +| ------------------------------------------ | --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| LOGGING\_\_LEVEL | logging.level | Log level | +| CONCURRENCY\_\_ENABLED | concurrency.enabled | Enable multi tenant queue mode | +| METRICS\_\_PROMETHEUS\_\_ENABLED | metrics.prometheus.enabled | Enable Prometheus metrics collection | +| METRICS\_\_PROMETHEUS\_\_PREFIX | metrics.prometheus.prefix | Prefix for Prometheus metrics (e.g. {product}-{service}) | +| WEBSERVER\_\_HOST | webserver.host | Host of the webserver (offering e.g. /prometheus, /ready and /health endpoints) | +| WEBSERVER\_\_PORT | webserver.port | Port of the webserver | +| RABBITMQ\_\_HOST | rabbitmq.host | Host of the RabbitMQ server | +| RABBITMQ\_\_PORT | rabbitmq.port | Port of the RabbitMQ server | +| RABBITMQ\_\_USERNAME | rabbitmq.username | Username for the RabbitMQ server | +| RABBITMQ\_\_PASSWORD | rabbitmq.password | Password for the RabbitMQ server | +| RABBITMQ\_\_HEARTBEAT | rabbitmq.heartbeat | Heartbeat for the RabbitMQ server | +| RABBITMQ\_\_CONNECTION_SLEEP | rabbitmq.connection_sleep | Sleep time intervals during message processing. Has to be a divider of heartbeat, and shouldn't be too big, since only in these intervals queue interactions happen (like receiving new messages) This is also the minimum time the service needs to process a message. | +| RABBITMQ\_\_INPUT_QUEUE | rabbitmq.input_queue | Name of the input queue in single queue setting | +| RABBITMQ\_\_OUTPUT_QUEUE | rabbitmq.output_queue | Name of the output queue in single queue setting | +| RABBITMQ\_\_DEAD_LETTER_QUEUE | rabbitmq.dead_letter_queue | Name of the dead letter queue in single queue setting | +| RABBITMQ\_\_TENANT_EVENT_QUEUE_SUFFIX | rabbitmq.tenant_event_queue_suffix | Suffix for the tenant event queue in multi tenant/queue setting | +| RABBITMQ\_\_TENANT_EVENT_DLQ_SUFFIX | rabbitmq.tenant_event_dlq_suffix | Suffix for the dead letter queue in multi tenant/queue setting | +| RABBITMQ\_\_TENANT_EXCHANGE_NAME | rabbitmq.tenant_exchange_name | Name of tenant exchange in multi tenant/queue setting | +| RABBITMQ\_\_QUEUE_EXPIRATION_TIME | rabbitmq.queue_expiration_time | Time until queue expiration in multi tenant/queue setting | +| RABBITMQ\_\_SERVICE_REQUEST_QUEUE_PREFIX | rabbitmq.service_request_queue_prefix | Service request queue prefix in multi tenant/queue setting | +| RABBITMQ\_\_SERVICE_REQUEST_EXCHANGE_NAME | rabbitmq.service_request_exchange_name | Service request exchange name in multi tenant/queue setting | +| RABBITMQ\_\_SERVICE_RESPONSE_EXCHANGE_NAME | rabbitmq.service_response_exchange_name | Service response exchange name in multi tenant/queue setting | +| RABBITMQ\_\_SERVICE_DLQ_NAME | rabbitmq.service_dlq_name | Service dead letter queue name in multi tenant/queue setting | +| STORAGE\_\_BACKEND | storage.backend | Storage backend to use (currently only "s3" and "azure" are supported) | +| STORAGE\_\_S3\_\_BUCKET | storage.s3.bucket | Name of the S3 bucket | +| STORAGE\_\_S3\_\_ENDPOINT | storage.s3.endpoint | Endpoint of the S3 server | +| STORAGE\_\_S3\_\_KEY | storage.s3.key | Access key for the S3 server | +| STORAGE\_\_S3\_\_SECRET | storage.s3.secret | Secret key for the S3 server | +| STORAGE\_\_S3\_\_REGION | storage.s3.region | Region of the S3 server | +| STORAGE\_\_AZURE\_\_CONTAINER | storage.azure.container_name | Name of the Azure container | +| STORAGE\_\_AZURE\_\_CONNECTION_STRING | storage.azure.connection_string | Connection string for the Azure server | +| STORAGE\_\_TENANT_SERVER\_\_PUBLIC_KEY | storage.tenant_server.public_key | Public key of the tenant server | +| STORAGE\_\_TENANT_SERVER\_\_ENDPOINT | storage.tenant_server.endpoint | Endpoint of the tenant server | +| TRACING\_\_ENABLED | tracing.enabled | Enable tracing | +| TRACING\_\_TYPE | tracing.type | Tracing mode - possible values: "opentelemetry", "azure_monitor" (Excpects APPLICATIONINSIGHTS_CONNECTION_STRING environment variable.) | +| TRACING\_\_OPENTELEMETRY\_\_ENDPOINT | tracing.opentelemetry.endpoint | Endpoint to which OpenTelemetry traces are exported | +| TRACING\_\_OPENTELEMETRY\_\_SERVICE_NAME | tracing.opentelemetry.service_name | Name of the service as displayed in the traces collected | +| TRACING\_\_OPENTELEMETRY\_\_EXPORTER | tracing.opentelemetry.exporter | Name of exporter | +| KUBERNETES\_\_POD_NAME | kubernetes.pod_name | Service pod name | ### OpenTelemetry @@ -115,10 +128,8 @@ callback = make_download_process_upload_callback(processing_function, settings) start_standard_queue_consumer(callback, settings) # optionally also pass a fastAPI app object with preconfigured routes ``` - ### AMQP input message: - Either use the legacy format with dossierId and fileId as strings or the new format where absolute paths are used. All headers beginning with "X-" are forwarded to the message processor, and returned in the response message (e.g. "X-TENANT-ID" is used to acquire storage information for the tenant). From c7e0df758eb91a940563249aa7331a3be7929543 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Tue, 23 Jul 2024 15:42:48 +0200 Subject: [PATCH 32/35] feat: add async health endpoint --- pyinfra/webserver/utils.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/pyinfra/webserver/utils.py b/pyinfra/webserver/utils.py index 710c26a..9a2a438 100644 --- a/pyinfra/webserver/utils.py +++ b/pyinfra/webserver/utils.py @@ -1,3 +1,4 @@ +import inspect import logging import threading from typing import Callable @@ -31,13 +32,23 @@ def add_health_check_endpoint(app: FastAPI, health_function: HealthFunction) -> """Add a health check endpoint to the app. The health function should return True if the service is healthy, and False otherwise. The health function is called when the endpoint is hit. """ + if inspect.iscoroutinefunction(health_function): - @app.get("/health") - @app.get("/ready") - def check_health(): - if health_function(): - return {"status": "OK"}, 200 - else: + @app.get("/health") + @app.get("/ready") + async def async_check_health(): + alive = await health_function() + if alive: + return {"status": "OK"}, 200 + return {"status": "Service Unavailable"}, 503 + + else: + + @app.get("/health") + @app.get("/ready") + def check_health(): + if health_function(): + return {"status": "OK"}, 200 return {"status": "Service Unavailable"}, 503 return app From 23aaaf68b187973b9f440350a094c3345256de8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Tue, 23 Jul 2024 18:34:50 +0200 Subject: [PATCH 33/35] refactor: simplify rabbitmq config --- pyinfra/examples.py | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/pyinfra/examples.py b/pyinfra/examples.py index 6ad1fa8..68abe5e 100644 --- a/pyinfra/examples.py +++ b/pyinfra/examples.py @@ -19,24 +19,6 @@ from pyinfra.webserver.utils import ( ) -def get_rabbitmq_config(settings: Dynaconf): - return RabbitMQConfig( - host=settings.rabbitmq.host, - port=settings.rabbitmq.port, - username=settings.rabbitmq.username, - password=settings.rabbitmq.password, - heartbeat=settings.rabbitmq.heartbeat, - input_queue_prefix=settings.rabbitmq.service_request_queue_prefix, - tenant_event_queue_suffix=settings.rabbitmq.tenant_event_queue_suffix, - tenant_exchange_name=settings.rabbitmq.tenant_exchange_name, - service_request_exchange_name=settings.rabbitmq.service_request_exchange_name, - service_response_exchange_name=settings.rabbitmq.service_response_exchange_name, - service_dead_letter_queue_name=settings.rabbitmq.service_dlq_name, - queue_expiration_time=settings.rabbitmq.queue_expiration_time, - pod_name=settings.kubernetes.pod_name, - ) - - def start_standard_queue_consumer( callback: Callback, settings: Dynaconf, @@ -67,7 +49,21 @@ def start_standard_queue_consumer( instrument_app(app) if settings.concurrency.enabled: - config = get_rabbitmq_config(settings) + config = RabbitMQConfig( + host=settings.rabbitmq.host, + port=settings.rabbitmq.port, + username=settings.rabbitmq.username, + password=settings.rabbitmq.password, + heartbeat=settings.rabbitmq.heartbeat, + input_queue_prefix=settings.rabbitmq.service_request_queue_prefix, + tenant_event_queue_suffix=settings.rabbitmq.tenant_event_queue_suffix, + tenant_exchange_name=settings.rabbitmq.tenant_exchange_name, + service_request_exchange_name=settings.rabbitmq.service_request_exchange_name, + service_response_exchange_name=settings.rabbitmq.service_response_exchange_name, + service_dead_letter_queue_name=settings.rabbitmq.service_dlq_name, + queue_expiration_time=settings.rabbitmq.queue_expiration_time, + pod_name=settings.kubernetes.pod_name, + ) manager = AsyncQueueManager( config=config, tenant_service_url=settings.storage.tenant_server.endpoint, message_processor=callback ) From 66aaeca92801f9cca89b232f42f117265a34e6d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Wed, 24 Jul 2024 17:28:13 +0200 Subject: [PATCH 34/35] fix: async queue test --- scripts/send_async_request.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/scripts/send_async_request.py b/scripts/send_async_request.py index 4b44cf4..931b2b8 100644 --- a/scripts/send_async_request.py +++ b/scripts/send_async_request.py @@ -9,8 +9,7 @@ from aio_pika.abc import AbstractIncomingMessage from kn_utils.logging import logger from pyinfra.config.loader import load_settings, local_pyinfra_root_path -from pyinfra.examples import get_rabbitmq_config -from pyinfra.queue.async_manager import AsyncQueueManager +from pyinfra.queue.async_manager import AsyncQueueManager, RabbitMQConfig from pyinfra.storage.storages.s3 import S3Storage, get_s3_storage_from_settings settings = load_settings(local_pyinfra_root_path / "config/") @@ -89,7 +88,21 @@ def upload_json_and_make_message_body(tenant_id: str): async def test_rabbitmq_handler() -> None: tenant_service_url = settings.storage.tenant_server.endpoint - config = get_rabbitmq_config(settings) + config = RabbitMQConfig( + host=settings.rabbitmq.host, + port=settings.rabbitmq.port, + username=settings.rabbitmq.username, + password=settings.rabbitmq.password, + heartbeat=settings.rabbitmq.heartbeat, + input_queue_prefix=settings.rabbitmq.service_request_queue_prefix, + tenant_event_queue_suffix=settings.rabbitmq.tenant_event_queue_suffix, + tenant_exchange_name=settings.rabbitmq.tenant_exchange_name, + service_request_exchange_name=settings.rabbitmq.service_request_exchange_name, + service_response_exchange_name=settings.rabbitmq.service_response_exchange_name, + service_dead_letter_queue_name=settings.rabbitmq.service_dlq_name, + queue_expiration_time=settings.rabbitmq.queue_expiration_time, + pod_name=settings.kubernetes.pod_name, + ) handler = AsyncQueueManager(config, tenant_service_url, dummy_message_processor) @@ -104,7 +117,7 @@ async def test_rabbitmq_handler() -> None: Message(body=json.dumps(create_message).encode()), routing_key="tenant.created" ) logger.info(f"Sent create tenant message for {tenant_id}") - await asyncio.sleep(2) # Wait for queue creation + await asyncio.sleep(0.5) # Wait for queue creation # Prepare service request service_request, storage = upload_json_and_make_message_body(tenant_id) @@ -128,7 +141,7 @@ async def test_rabbitmq_handler() -> None: Message(body=json.dumps(delete_message).encode()), routing_key="tenant.delete" ) logger.info(f"Sent delete tenant message for {tenant_id}") - await asyncio.sleep(2) # Wait for queue deletion + await asyncio.sleep(0.5) # Wait for queue deletion await handler.connection.close() From 2a2028085e57b81ccb7ca636d269846bf458929b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonathan=20K=C3=B6ssler?= Date: Thu, 25 Jul 2024 14:45:19 +0200 Subject: [PATCH 35/35] feat: add async retry for tenant server calls --- poetry.lock | 17 ++++++++++++- pyinfra/queue/async_manager.py | 46 +++++++++++++++++++++------------- pyproject.toml | 1 + 3 files changed, 45 insertions(+), 19 deletions(-) diff --git a/poetry.lock b/poetry.lock index 51bcd1a..712ed34 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3413,6 +3413,21 @@ anyio = ">=3.4.0,<5" [package.extras] full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"] +[[package]] +name = "tenacity" +version = "8.5.0" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"}, + {file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"}, +] + +[package.extras] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] + [[package]] name = "tomli" version = "2.0.1" @@ -3802,4 +3817,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.11" -content-hash = "5d7e7dddc7b3aca84f5263def609a2dee5c7155b940d54cd4a158bb72b2bf496" +content-hash = "8a0ce0f234721a8db75619f13e108a94d03a7db14e532c4b8799cf96e8927f45" diff --git a/pyinfra/queue/async_manager.py b/pyinfra/queue/async_manager.py index be7b02d..c435ef3 100644 --- a/pyinfra/queue/async_manager.py +++ b/pyinfra/queue/async_manager.py @@ -13,7 +13,12 @@ from aio_pika.abc import ( AbstractIncomingMessage, ) from kn_utils.logging import logger -from retry import retry +from tenacity import ( + retry, + retry_if_exception_type, + stop_after_attempt, + wait_exponential_jitter, +) @dataclass @@ -190,26 +195,31 @@ class AsyncQueueManager: ) logger.info(f"Published result to queue {tenant_id}.") - @retry(tries=3, delay=5, jitter=(1, 3), logger=logger, exceptions=(aiohttp.ClientError)) + @retry( + stop=stop_after_attempt(5), + wait=wait_exponential_jitter(initial=1, max=10), + retry=retry_if_exception_type(aiohttp.ClientResponseError), + reraise=True, + ) async def fetch_active_tenants(self) -> Set[str]: - try: - async with aiohttp.ClientSession() as session: - async with session.get(self.tenant_service_url) as response: - response.raise_for_status() - if response.headers["content-type"].lower() == "application/json": - data = await response.json() - return {tenant["tenantId"] for tenant in data} - else: - logger.error( - f"Failed to fetch active tenants. Content type is not JSON: {response.headers['content-type'].lower()}" - ) - return set() - except aiohttp.ClientError as e: - logger.error(f"Error fetching active tenants: {e}") - return set() + async with aiohttp.ClientSession() as session: + async with session.get(self.tenant_service_url) as response: + response.raise_for_status() + if response.headers["content-type"].lower() == "application/json": + data = await response.json() + return {tenant["tenantId"] for tenant in data} + else: + logger.error( + f"Failed to fetch active tenants. Content type is not JSON: {response.headers['content-type'].lower()}" + ) + return set() async def initialize_tenant_queues(self) -> None: - active_tenants = await self.fetch_active_tenants() + try: + active_tenants = await self.fetch_active_tenants() + except aiohttp.ClientResponseError: + logger.warning("API calls to tenant server failed. No tenant queues initialized.") + active_tenants = set() for tenant_id in active_tenants: await self.create_tenant_queues(tenant_id) diff --git a/pyproject.toml b/pyproject.toml index 8cbc932..13504bd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,6 +36,7 @@ wcwidth = "<=0.2.12" azure-monitor-opentelemetry = "^1.6.0" aio-pika = "^9.4.2" aiohttp = "^3.9.5" +tenacity = "^8.5.0" [tool.poetry.group.dev.dependencies] pytest = "^7"