Compare commits

...

315 Commits

Author SHA1 Message Date
Julius Unverfehrt
3ef4246d1e chore: fuzzy pin kn-utils to allow for future updates 2025-01-22 12:36:38 +01:00
Julius Unverfehrt
841c492639 Merge branch 'chore/RES-871-update-callback' into 'master'
feat:BREAKING CHANGE: download callback no forwards all files as bytes

See merge request knecon/research/pyinfra!108
2025-01-16 11:11:59 +01:00
Julius Unverfehrt
ead069d3a7 chore: adjust docstrings 2025-01-16 10:35:06 +01:00
Julius Unverfehrt
044ea6cf0a feat: streamline download to always include the filename of the downloaded file 2025-01-16 10:29:50 +01:00
Julius Unverfehrt
ff7547e2c6 fix: remove faulty import 2025-01-16 10:29:50 +01:00
Julius Unverfehrt
fbf79ef758 chore: regenerate BOM 2025-01-16 10:29:50 +01:00
Julius Unverfehrt
f382887d40 chore: seek and destroy proto in code 2025-01-16 10:29:50 +01:00
Julius Unverfehrt
5c4400aa8b feat:BREAKING CHANGE: download callback no forwards all files as bytes 2025-01-16 10:29:46 +01:00
Jonathan Kössler
5ce66f18a0 Merge branch 'bugfix/RED-10722' into 'master'
fix: dlq init

See merge request knecon/research/pyinfra!109
2025-01-15 10:56:12 +01:00
Jonathan Kössler
ea0c55930a chore: remove test nack 2025-01-15 10:00:50 +01:00
Jonathan Kössler
87f57e2244 fix: dlq init 2025-01-14 16:39:47 +01:00
Jonathan Kössler
3fb8c4e641 fix: do not use groups for packages 2024-12-18 16:33:35 +01:00
Jonathan Kössler
e23f63acf0 Merge branch 'chore/nexus-package-registry' into 'master'
RES-914: move package registry to nexus

See merge request knecon/research/pyinfra!106
2024-11-20 10:02:52 +01:00
Jonathan Kössler
d3fecc518e chore: move integration tests to own subfolder 2024-11-18 17:31:15 +01:00
Jonathan Kössler
341500d463 chore: set lower bound for opentelemetry dependencies 2024-11-18 17:28:11 +01:00
Jonathan Kössler
e002f77fd5 Revert "chore: update opentelemetry for proto v5 support"
This reverts commit 3c6d8f2dcc73b17f329f9cecb8d4d301f848dc1e.
2024-11-18 17:19:37 +01:00
Jonathan Kössler
3c6d8f2dcc chore: update opentelemetry for proto v5 support 2024-11-18 15:14:34 +01:00
Jonathan Kössler
f6d6ba40bb chore: add pytest-cov 2024-11-18 13:57:39 +01:00
Jonathan Kössler
6a0bbad108 ops: update CI 2024-11-18 13:53:11 +01:00
Jonathan Kössler
527a671a75 feat: move package registry to nexus 2024-11-18 13:49:48 +01:00
Jonathan Kössler
cf91189728 Merge branch 'feature/RED-10441' into 'master'
RED-10441: separate queue and webserver shutdown

See merge request knecon/research/pyinfra!105
2024-11-13 17:17:13 +01:00
Jonathan Kössler
61a6d0eeed feat: separate queue and webserver shutdown 2024-11-13 17:02:21 +01:00
Jonathan Kössler
bc0b355ff9 Merge branch 'feature/RED-10441' into 'master'
RED-10441: ensure queue manager shutdown

See merge request knecon/research/pyinfra!104
2024-11-13 16:34:25 +01:00
Jonathan Kössler
235e27b74c chore: bump version 2024-11-13 16:31:48 +01:00
Jonathan Kössler
1540c2894e feat: ensure shutdown of queue manager 2024-11-13 16:30:18 +01:00
Jonathan Kössler
9b60594ce1 Merge branch 'feature/RED-10441' into 'master'
RED-10441: Fix graceful shutdown

See merge request knecon/research/pyinfra!103
2024-11-13 14:48:34 +01:00
Jonathan Kössler
3d3c76b466 chore: bump version 2024-11-13 13:55:15 +01:00
Jonathan Kössler
9d4ec84b49 fix: use signals for graceful shutdown 2024-11-13 13:54:41 +01:00
Jonathan Kössler
8891249d7a Merge branch 'feature/RED-10441' into 'master'
RED-10441: fix abandoned queues

See merge request knecon/research/pyinfra!102
2024-11-13 09:35:36 +01:00
Jonathan Kössler
e51e5c33eb chore: cleanup 2024-11-12 17:24:57 +01:00
Jonathan Kössler
04c90533b6 refactor: fetch active tenants before start 2024-11-12 17:11:33 +01:00
Jonathan Kössler
86af05c12c feat: add logger to retry 2024-11-12 16:50:23 +01:00
Jonathan Kössler
c6e336cb35 refactor: tenant queues init 2024-11-12 15:55:11 +01:00
Jonathan Kössler
bf6f95f3e0 feat: exit on ClientResponseError 2024-11-12 15:32:11 +01:00
Jonathan Kössler
ed2bd1ec86 refactor: raise error if tenant service is not available 2024-11-12 13:30:21 +01:00
Julius Unverfehrt
9906f68e0a chore: bumb versions to enable package rebuild (current package has the wrong hash due to backup issues) 2024-11-11 12:47:27 +01:00
Julius Unverfehrt
0af648d66c fix: rebuild since mia and update rebuild kn_utils 2024-11-08 13:52:08 +01:00
Jonathan Kössler
46dc1fdce4 Merge branch 'feature/RES-809' into 'master'
RES-809: update kn_utils

See merge request knecon/research/pyinfra!101
2024-10-23 18:01:25 +02:00
Jonathan Kössler
bd2f0b9b9a feat: switch out tenacity retry with kn_utils 2024-10-23 16:06:06 +02:00
Jonathan Kössler
131afd7d3e chore: update kn_utils 2024-10-23 16:04:08 +02:00
Jonathan Kössler
98532c60ed Merge branch 'feature/RES-858-fix-graceful-shutdown' into 'master'
RES-858: fix graceful shutdown for unexpected broker disconnects

See merge request knecon/research/pyinfra!100
2024-09-30 09:54:25 +02:00
Jonathan Kössler
45377ba172 feat: improve on close callback and simplify exception handling 2024-09-27 17:11:10 +02:00
Jonathan Kössler
f855224e29 feat: add on close callback 2024-09-27 10:00:41 +02:00
Jonathan Kössler
541219177f feat: add error handling to shutdown logic 2024-09-26 12:28:55 +02:00
Jonathan Kössler
4119a7d7d7 chore: bump version 2024-09-26 11:05:12 +02:00
Jonathan Kössler
e2edfa7260 fix: simplify webserver shutdown 2024-09-26 10:33:05 +02:00
Jonathan Kössler
b70b16c541 Merge branch 'feature/RES-856-test-proto-format' into 'master'
RES-856: add type tests for proto format

See merge request knecon/research/pyinfra!99
2024-09-26 10:07:29 +02:00
Jonathan Kössler
e8d9326e48 chore: rewrite lock and bump version 2024-09-26 09:45:42 +02:00
Jonathan Kössler
9669152e14 Merge branch 'master' into feature/RES-856-test-proto-format 2024-09-26 09:39:28 +02:00
Jonathan Kössler
ed3f8088e1 Merge branch 'feature/RES-844-fix-tracing' into 'master'
RES-844: fix opentelemtry tracing

See merge request knecon/research/pyinfra!98
2024-09-26 09:13:52 +02:00
Jonathan Kössler
66eaa9a748 feat: set range for protobuf version 2024-09-25 14:16:40 +02:00
Jonathan Kössler
3a04359320 chore: bump pyinfra version 2024-09-25 11:59:52 +02:00
Jonathan Kössler
b46fcbd977 feat: add AioPikaInstrumentor 2024-09-25 11:58:51 +02:00
Jonathan Kössler
e75df42bec feat: skip keys in int conversion 2024-09-25 11:07:20 +02:00
Jonathan Kössler
3bab86fe83 chore: update test files 2024-09-24 11:59:08 +02:00
Jonathan Kössler
c5d53b8665 feat: add file comparison 2024-09-24 11:57:33 +02:00
Jonathan Kössler
09d39930e7 chore: cleanup test 2024-09-23 16:43:59 +02:00
Jonathan Kössler
a81f1bf31a chore: update protobuf to 25.5 2024-09-23 16:41:57 +02:00
Francisco Schulz
0783e95d22 Merge branch 'RED-10017-investigate-crashing-py-services-when-upload-large-number-of-files' into 'master'
fix: add semaphore to AsyncQueueManager to limit concurrent tasks

See merge request knecon/research/pyinfra!97
2024-09-23 15:19:40 +02:00
Francisco Schulz
8ec13502a9 fix: add semaphore to AsyncQueueManager to limit concurrent tasks 2024-09-23 15:19:40 +02:00
Jonathan Kössler
43881de526 feat: add tests for types of documentreader 2024-09-20 16:42:55 +02:00
Julius Unverfehrt
67c30a5620 fix: recompile proto schemas with experimental schema update 2024-09-20 15:23:13 +02:00
Francisco Schulz
8e21b2144c Merge branch 'fix-poetry-version' into 'master'
chore: update package version

See merge request knecon/research/pyinfra!96
2024-09-02 16:56:58 +02:00
francisco.schulz
5b45cae9a0 chore: update package version 2024-09-02 10:53:09 -04:00
Francisco Schulz
f2a5a2ea0e Merge branch 'custom-build-image-classification-service-protobuf' into 'master'
fix(temp): set protobuf version range to >=v3,<v4 so image-classification model keeps working

See merge request knecon/research/pyinfra!95
2024-09-02 16:48:56 +02:00
francisco.schulz
2133933d25 chore: update dependencies 2024-08-30 08:42:19 -04:00
francisco.schulz
4c8dc6ccc0 fix(temp): set protobuf version range to >=v3,<v4 so image-classification model keeps working 2024-08-30 08:37:31 -04:00
Julius Unverfehrt
5f31e2b15f Merge branch 'RES-842-pyinfra-fix-rabbit-mq-handler-shuts-down-when-queues-not-available-yet' into 'master'
fix(queuemanager): add retries to prevent container from shutting down when queues are not available yet

See merge request knecon/research/pyinfra!94
2024-08-30 13:59:02 +02:00
francisco.schulz
88aef57c5f chore: version increase 2024-08-29 11:18:36 -04:00
francisco.schulz
2b129b35f4 fix(queuemanager): add retries to prevent container from shutting down when queues are not available yet 2024-08-29 11:17:11 -04:00
Jonathan Kössler
facb9726f9 Merge branch 'feature/RES-840-add-client-connector-error' into 'master'
feat: add ClientConnectorError

See merge request knecon/research/pyinfra!93
2024-08-28 14:39:40 +02:00
Jonathan Kössler
b6a2069a6a feat: add ClientConnectorError 2024-08-28 10:28:12 +02:00
Jonathan Kössler
f626ef2e6f Merge branch 'bugfix/RES-834-service-disconnects' into 'master'
fix: pod restarts due to health check

See merge request knecon/research/pyinfra!92
2024-08-26 15:10:51 +02:00
Jonathan Kössler
318779413a fix: add signal to webserver 2024-08-23 17:23:53 +02:00
Jonathan Kössler
f27b1fbba1 chore: bump version 2024-08-23 16:56:54 +02:00
Jonathan Kössler
f2018f9c86 fix: process message in thread in event loop 2024-08-23 16:56:24 +02:00
Julius Unverfehrt
a5167d1230 Merge branch 'bugfix/RES-826-fix-initial-startup' into 'master'
fix: add async webserver for probes

See merge request knecon/research/pyinfra!91
2024-08-21 17:25:35 +02:00
Jonathan Kössler
1e939febc2 refactor: function naming 2024-08-21 17:02:04 +02:00
Jonathan Kössler
564f2cbb43 chore: bump version 2024-08-21 16:25:17 +02:00
Jonathan Kössler
fa44f36088 feat: add async webserver for probes 2024-08-21 16:24:20 +02:00
Jonathan Kössler
2970823cc1 Merge branch 'refactor/tenant_queue_settings' into 'master'
refactor: tenant queues settings

See merge request knecon/research/pyinfra!90
2024-08-19 14:43:24 +02:00
Jonathan Kössler
dba348a621 refactor: tenant queues settings 2024-08-19 14:37:48 +02:00
Jonathan Kössler
5020e54dcc Merge branch 'fix/RES-820-channel-opening' into 'master'
fix: use is_initialized instead of is_open

See merge request knecon/research/pyinfra!89
2024-08-16 14:23:46 +02:00
Jonathan Kössler
2bc332831e fix: use is_initialized instead of is_open 2024-08-16 12:37:28 +02:00
Jonathan Kössler
b3f1529be2 chore: bump version 2024-08-06 09:48:09 +02:00
Jonathan Kössler
789f6a7f7c Merge branch 'feat/RES-757-protobuffer' into 'master'
feat: add protobuffer

See merge request knecon/research/pyinfra!87
2024-08-06 09:44:01 +02:00
Jonathan Kössler
06ce8bbb22 Merge branch 'master' into feat/RES-757-protobuffer 2024-08-05 11:01:40 +02:00
Jonathan Kössler
fdde56991b Merge branch 'refactor/RES-780-graceful-shutdown' into 'master'
refactor: graceful shutdown

See merge request knecon/research/pyinfra!88
2024-08-02 13:57:04 +02:00
Jonathan Kössler
cb8509b120 refactor: message counter 2024-08-01 17:42:59 +02:00
Jonathan Kössler
47b42e95e2 refactor: graceful shutdown 2024-08-01 15:31:58 +02:00
Jonathan Kössler
536284ed84 chore: update readme 2024-08-01 09:56:13 +02:00
Jonathan Kössler
aeac1c58f9 chore: bump pyinfra version 2024-07-31 16:05:42 +02:00
Jonathan Kössler
b12b1ce42b refactor: use protoc 4.25.x as compiler to avoid dependency issues 2024-07-31 16:04:43 +02:00
Jonathan Kössler
50b7a877e9 fix: poetry lock 2024-07-30 10:45:37 +02:00
Jonathan Kössler
f3d0f24ea6 Merge branch 'master' into feat/RES-757-protobuffer 2024-07-30 10:40:56 +02:00
Jonathan Kössler
8f1ad1a4bd Merge branch 'feature/RES-731-add-queues-per-tenant' into 'master'
feat: refractor to work asynchronously

See merge request knecon/research/pyinfra!86
2024-07-29 15:06:05 +02:00
Jonathan Kössler
2a2028085e feat: add async retry for tenant server calls 2024-07-25 14:45:19 +02:00
Jonathan Kössler
66aaeca928 fix: async queue test 2024-07-24 17:28:13 +02:00
Jonathan Kössler
23aaaf68b1 refactor: simplify rabbitmq config 2024-07-23 18:34:50 +02:00
Jonathan Kössler
c7e0df758e feat: add async health endpoint 2024-07-23 15:42:48 +02:00
Jonathan Kössler
13d670091c chore: update readme 2024-07-22 17:31:32 +02:00
Jonathan Kössler
1520e96287 refactor: cleanup codebase 2024-07-22 16:57:02 +02:00
Jonathan Kössler
28451e8f8f chore: bump pyinfra version 2024-07-22 16:54:28 +02:00
Jonathan Kössler
596d4a9bd0 feat: add expiration for tenant event queue and retry to tenant api call 2024-07-22 16:48:31 +02:00
Julius Unverfehrt
70d3a210a1 feat: update data loader tests
We now compare the output proto json conversion to expected json files.
This revealed multiple differences between the file.

FIXED: int64 type was cast into string in python. We now get proper
integers

TODO: Empty fields are omitted by proto, but the jsons have them and the
pyinfra implementing services might expect them. We have to test this
behaviour and adjusts the tests accordingly.
2024-07-18 12:36:29 +02:00
Jonathan Kössler
f935056fa9 refactor: dataloader to not crash on unknown file formats 2024-07-17 13:54:50 +02:00
Jonathan Kössler
eeb4c3ce29 fix: add await to is_ready 2024-07-17 11:41:31 +02:00
Jonathan Kössler
b8833c7560 fix: settings mapping 2024-07-17 10:51:14 +02:00
Julius Unverfehrt
f175633f30 chore: track proto buf test data with dvc 2024-07-16 17:36:50 +02:00
Julius Unverfehrt
ceac21c1ef deps: add dvc 2024-07-16 17:35:03 +02:00
Julius Unverfehrt
0d232226fd feat: integrate proto data loader in pipeline 2024-07-16 17:34:39 +02:00
Julius Unverfehrt
9d55b3be89 feat: implement proto data loader 2024-07-16 16:32:58 +02:00
Julius Unverfehrt
edba6fc4da feat: track proto schmemata & add compilations to package 2024-07-16 16:31:48 +02:00
Julius Unverfehrt
c5d8a6ed84 feat: add proto requirements and instructions to readme for compiling the schemata 2024-07-16 16:30:32 +02:00
Julius Unverfehrt
c16000c774 fix(tracing test): make test work in case azure conntection string is missing 2024-07-15 16:13:41 +02:00
Jonathan Kössler
02665a5ef8 feat: align async queue manager 2024-07-12 15:14:13 +02:00
Jonathan Kössler
9c28498d8a feat: rollback testing logic for send_request 2024-07-12 15:12:46 +02:00
Jonathan Kössler
3c3580d3bc feat: add backwards compatibility 2024-07-12 12:26:56 +02:00
Jonathan Kössler
8ac16de0fa feat: add backwards compatibility 2024-07-12 12:23:45 +02:00
Jonathan Kössler
8844df44ce feat: add async_v2 2024-07-12 12:12:55 +02:00
Jonathan Kössler
a5162d5bf0 chore: update poetry deps 2024-07-12 12:10:31 +02:00
francisco.schulz
f9aec74d55 chore: clean up + improve robustness 2024-07-11 15:54:21 -04:00
francisco.schulz
7559118822 fix: remove sleep commands 2024-07-11 14:50:11 -04:00
francisco.schulz
5ff65f2cf4 feat(tests): add RabbitMQHandler class tests 2024-07-11 14:46:41 -04:00
francisco.schulz
cc25a20c24 feat(process_input_message): add message processing logic with support to pass in external message processor 2024-07-11 12:21:48 -04:00
francisco.schulz
f723bcb9b1 fix(fetch_active_tenants): propper async API call 2024-07-11 12:06:59 -04:00
francisco.schulz
abde776cd1 feat(RabbitMQHandler): add async test class 2024-07-11 11:55:52 -04:00
francisco.schulz
aa23894858 chose(dependencies): update 2024-07-11 11:55:17 -04:00
Jonathan Kössler
2da4f37620 feat: wip for multiple tenants - for pkg build 2024-07-11 12:49:07 +02:00
Jonathan Kössler
9b20a67ace feat: wip for multiple tenants - for pkg build 2024-07-11 11:41:09 +02:00
Jonathan Kössler
7b6408e0de feat: wip for multiple tenants - for pkg build 2024-07-11 11:04:02 +02:00
Jonathan Kössler
6e7c4ccb7b feat: wip for multiple tenants - for pkg build 2024-07-10 11:45:47 +02:00
Jonathan Kössler
b2e3ae092f feat: wip for multiple tenants 2024-07-09 18:20:55 +02:00
Jonathan Kössler
de41030e69 feat: wip for multiple tenants 2024-07-05 13:27:16 +02:00
Jonathan Kössler
c81d967aee feat: wip for multiple tenants 2024-07-03 17:51:47 +02:00
Jonathan Kössler
30330937ce feat: wip for multiple tenants 2024-07-02 18:07:23 +02:00
Jonathan Kössler
7624208188 feat: wip for multiple tenants 2024-07-01 18:15:04 +02:00
Jonathan Kössler
6fabe1ae8c feat: wip for multiple tenants 2024-06-28 15:41:53 +02:00
Jonathan Kössler
3532f949a9 refactor: remove second trace setup 2024-06-26 18:15:51 +02:00
Jonathan Kössler
65cc1c9aad fix: improve error handling for tracing settings 2024-06-26 18:02:52 +02:00
Jonathan Kössler
2484a5e9f7 chore: bump pyinfra version 2024-06-17 13:53:42 +02:00
Julius Unverfehrt
88fe7383f3 Merge branch 'feature/RES-718-add-azure-monitoring' into 'master'
RES-718: add azure tracing

See merge request knecon/research/pyinfra!85
2024-06-17 12:25:09 +02:00
Jonathan Kössler
18a0ddc2d3 feat: add tracing settings to validator 2024-06-13 08:47:50 +02:00
Jonathan Kössler
5328e8de03 refactor: streamline tracing types 2024-06-12 10:41:52 +02:00
Jonathan Kössler
9661d75d8a refactor: update tracing info for Azure Monitor 2024-06-11 14:31:06 +02:00
Jonathan Kössler
7dbcdf1650 feat: add azure opentelemtry monitoring 2024-06-11 12:00:18 +02:00
Julius Unverfehrt
4536f9d35b Merge branch 'RES-671-multi-file-dl' into 'master'
feat: add multiple file download

See merge request knecon/research/pyinfra!84
2024-04-18 16:47:00 +02:00
Julius Unverfehrt
a1e7b3b565 build: add SBOM and increment package version 2024-04-18 16:39:46 +02:00
Julius Unverfehrt
b810449bba feat: add multiple file download
The download function is now overloaded and additionlly supports a
dict with file paths as values, in addition to the present string as
file path. The data is forwarded as dict of the same structure in the
first case.
2024-04-18 16:35:55 +02:00
Julius Unverfehrt
f67813702a Merge branch 'RED-8978-no-crash-on-non-existing-files' into 'master'
fix: add error handling for file not found error

See merge request knecon/research/pyinfra!83
2024-04-16 16:28:25 +02:00
Julius Unverfehrt
ed4f912acf build: increment service version 2024-04-16 16:21:57 +02:00
Julius Unverfehrt
021222475b fix: add error handling for file not found error
When a file couldn't be downloaded from storage, the queue consumer now
informs the operator with a log and rejects the message, without crashing
but continuing its honest work.
2024-04-16 16:20:08 +02:00
Julius Unverfehrt
876253b3fb tests: add test for file not found error 2024-04-16 16:19:45 +02:00
Julius Unverfehrt
1689cd762b fix(CI): fix CI 2024-01-31 12:03:07 +01:00
Julius Unverfehrt
dc413cea82 Merge branch 'opentel' into 'master'
RES-506, RES-507, RES-499, RES-434, RES-398

See merge request knecon/research/pyinfra!82
2024-01-31 11:21:17 +01:00
Julius Unverfehrt
bfb27383e4 fix(settings): change precedence to ENV ROOT_PATH > root_path arg 2024-01-31 10:24:29 +01:00
Julius Unverfehrt
af914ab3ae fix(argparse): automatically output settings path 2024-01-31 10:12:32 +01:00
Julius Unverfehrt
7093e01925 feat(opentelemetry): add webserver tracing to default pipeline 2024-01-31 09:09:13 +01:00
Julius Unverfehrt
88cfb2b1c1 fix(settings): add debug log 2024-01-30 14:52:35 +01:00
Julius Unverfehrt
c1301d287f fix(dependencies): move opentel deps to main since groups are not packaged with CI script 2024-01-30 14:31:08 +01:00
Julius Unverfehrt
f1b8e5a25f refac(arg parse): rename settings parsing fn for clarity 2024-01-30 13:27:19 +01:00
Julius Unverfehrt
fff5be2e50 feat(settings): improve config loading logic
Load settings from .toml files, .env and environment variables. Also ensures a ROOT_PATH environment variable is
set. If ROOT_PATH is not set and no root_path argument is passed, the current working directory is used as root.
Settings paths can be a single .toml file, a folder containing .toml files or a list of .toml files and folders.
If a folder is passed, all .toml files in the folder are loaded. If settings path is None, only .env and
environment variables are loaded. If settings_path are relative paths, they are joined with the root_path argument.
2024-01-30 12:56:58 +01:00
Julius Unverfehrt
ec9ab21198 package: increment major version and update kn-utils 2024-01-25 11:08:50 +01:00
Julius Unverfehrt
b2f073e0c5 refactor: IoC for callback, update readme 2024-01-25 10:41:48 +01:00
Julius Unverfehrt
f6f56b8d8c refactoy: simplify storage connection logic 2024-01-25 09:08:51 +01:00
Isaac Riley
8ff637d6ba chore: add opentelemetry subsection to README.md; formatting 2024-01-25 08:25:19 +01:00
Julius Unverfehrt
c18475a77d feat(opentelemetry): improve readability 2024-01-24 17:46:54 +01:00
Julius Unverfehrt
e0b32fa448 feat(opentelemetry): fastAPI tracing
The tests don't work yet since the webserver has to run in a thread and
the traces don't get exported from the thread with local json exporting.
However, with an export to an external server this should still work.
WIP
2024-01-24 15:52:42 +01:00
Julius Unverfehrt
da163897c4 feat(opentelemetry): add fastapi instumentation 2024-01-24 14:26:10 +01:00
Julius Unverfehrt
a415666830 feat(opentelemetry): put logic in own module 2024-01-24 14:00:11 +01:00
Julius Unverfehrt
739a7c0731 feat(opentelemetry): add queue instrumenting test 2024-01-24 13:26:01 +01:00
Isaac Riley
936bb4fe80 feat: add opentelemetry on top of newly refactored pyinfra 2024-01-24 08:09:42 +01:00
Julius Unverfehrt
725d6dce45 Update readme 2024-01-23 18:08:57 +01:00
Julius Unverfehrt
be602d8411 Adjust logs 2024-01-23 14:10:56 +01:00
Julius Unverfehrt
429a85b609 Disable automated tests until we found a way to rund docker compose before 2024-01-23 10:26:44 +01:00
Julius Unverfehrt
d6eeb65ccc Update scripts 2024-01-23 10:25:56 +01:00
Julius Unverfehrt
adfbd650e6 Add config tests, add type validation to config loading 2024-01-23 08:51:44 +01:00
Julius Unverfehrt
73eba97ede Add serving example
TODO: - update readme
      - check if logs are adequate
2024-01-19 14:53:06 +01:00
Julius Unverfehrt
8cd1d6b283 add retries to queue consuming, so we retray at least a bit if something happens. Eventually the container should crash though since there do exist unfixable problems sadly. 2024-01-19 14:15:00 +01:00
Julius Unverfehrt
87cbf89672 finnish config loading logic 2024-01-19 14:05:05 +01:00
Julius Unverfehrt
9c2f34e694 Put add health check in own function 2024-01-19 13:13:12 +01:00
Julius Unverfehrt
fbbfc553ae fix message encoding for response, rename some functions 2024-01-19 12:46:02 +01:00
Julius Unverfehrt
b7f860f36b WIP: add callback factory and update example scripts 2024-01-18 17:10:04 +01:00
Julius Unverfehrt
6802bf5960 refactor: download and upload file logic, module structure, remove redundant files so far 2024-01-18 15:54:38 +01:00
Julius Unverfehrt
ec5ad09fa8 refactor: multi tenant storage connection 2024-01-18 11:34:21 +01:00
Julius Unverfehrt
17c5eebdf6 finnish prometheus 2024-01-18 08:19:46 +01:00
Julius Unverfehrt
358e227251 fix prometheus tests WIP 2024-01-17 17:39:53 +01:00
Julius Unverfehrt
f31693d36a refactor: adapt prometheus monitoring logic to work with other webservers WIP 2024-01-16 17:24:53 +01:00
Julius Unverfehrt
e5c8a6e9f1 refactor: update storages with dynaconf logic, add validators, repair test 2024-01-16 15:34:56 +01:00
Julius Unverfehrt
27917863c9 refactor: finnish queue manager, queue manager tests, also add validation logic, integrate new settings 2024-01-16 14:35:23 +01:00
Julius Unverfehrt
ebc519ee0d refactor: finnish queue manager, queue manager tests, also add validation logic, integrate new settings 2024-01-16 14:16:27 +01:00
Julius Unverfehrt
b49645cce4 refactor: queue manager and config logic WIP 2024-01-15 16:46:33 +01:00
Julius Unverfehrt
64871bbb62 refactor: add basic queue manager test 2024-01-15 10:30:07 +01:00
Julius Unverfehrt
1f482f2476 fix: storage test 2024-01-09 16:07:48 +01:00
Francisco Schulz
8dfba74682 Merge branch 'RED-7958-logging-issues-of-python-services' into 'master'
Red 7958 logging issues of python services

See merge request knecon/research/pyinfra!81
2023-11-28 10:21:37 +01:00
francisco.schulz
570689ed9b increment version 2023-11-28 09:35:46 +01:00
francisco.schulz
5db56d8449 update CI template 2023-11-28 09:20:22 +01:00
francisco.schulz
3a9d34f9c0 add loglevel tests & fix broken exception and error log tests 2023-11-28 09:16:42 +01:00
francisco.schulz
3084d6338c update dependencies for kn-utils@0.2.4.dev112 2023-11-28 09:16:05 +01:00
Julius Unverfehrt
3a3a8e4ce1 Merge branch 'feature/version-upgrade-knutils-logging' into 'master'
Upgrade python version & change logger

See merge request knecon/research/pyinfra!80
2023-11-13 15:48:22 +01:00
Julius Unverfehrt
bb00c83a80 Upgrade python version & change logger
- Upgrades python version to 3.10 and sync packages with isaacs list.
- Changes loguru logger to kn_utlis logger.
- Overrides python version in CI script (temporarily until all services
  are updated and CI template can be adjusted).
2023-11-13 15:28:49 +01:00
Julius Unverfehrt
b297894505 Merge branch 'feature/stack-trace-for-exeptions' into 'master'
Add stacktrace to processing failures

See merge request knecon/research/pyinfra!79
2023-09-05 13:04:00 +02:00
Julius Unverfehrt
261b991049 Add stacktrace to processing failures
If a processing failure occures in the processing callback, pyinfra now
additionally to the exeption prints the stack trace.

Also removes knutils logging for now, since it still contains bugs and
it should be tested first in a non-production environment if
production-readiness is given.
2023-09-05 12:59:45 +02:00
Julius Unverfehrt
84c4e7601f Update kn-utils package
Update kn-utils for missing loglevels fix, which is needed for queue
manager error logging.
2023-08-30 15:58:29 +02:00
Julius Unverfehrt
201ed5b9a8 Merge branch 'feature/RED-6685-support-absolute-paths' into 'master'
Add support for absolute file paths

See merge request knecon/research/pyinfra!77
2023-08-23 14:11:46 +02:00
Julius Unverfehrt
72547201f3 Adjust log levels to reduce log clutter
Also updates readme and adds pytest execution to CI script.
2023-08-23 12:38:34 +02:00
Julius Unverfehrt
c09476cfae Update tests
All components from payload processing downwards are tested.

Tests that depend on docker compose have been disabled by default
because they take too long to use during development. Furthermore, the
queue manager tests are not stable, a refactoring with inversion of
control is urgently needed to make the components properly testable. The
storage tests are stable and should be run once before releasing, this
should be implemented via the CI script.

Also adds, if present, tenant Id and operation kwargs to storage and
queue response.
2023-08-22 17:33:22 +02:00
Julius Unverfehrt
e580a66347 Refactor storage provider & payload parser
Applies strategy pattern to payload parsing logic to improve
maintainability and testability.
Renames storage manager to storage provider.
2023-08-22 10:46:27 +02:00
Julius Unverfehrt
294688ea66 RED-7002 Forward exceptions from thread context
PyInfra now reports exceptions that happen inside the processing
callback.
Also refactors queue manager logging to fit new logger by
changing "%s", var logic to f string, since this syntax is not supported
with knutlis logging.
2023-08-22 10:46:27 +02:00
Julius Unverfehrt
7187f0ec0c RES-343 Update logging to knutils logger 2023-08-22 10:46:14 +02:00
Julius Unverfehrt
ef916ee790 Refactor payload processing logic
Streamlines payload processor class by encapsulating closely dependent
logic, to improve readability and maintainability.
2023-08-18 12:49:21 +02:00
Julius Unverfehrt
48d74b4307 Add support for absolute file paths
Introduces new payload parsing logic to be able to process absolute file
paths. The queue message is expected to contain the keys
"targetFilePath" and "responseFilePath".

To ensure backward-compatibility, the legacy "dossierId", "fileId"
messages are still supported.
2023-08-18 12:45:53 +02:00
Francisco Schulz
692ff204c3 Merge branch 'bugfix/RES-269' into 'master'
Bugfix/res 269

See merge request knecon/research/pyinfra!75
2023-08-17 09:55:27 +02:00
Francisco Schulz
03eddadcb9 update template 2023-08-17 09:48:35 +02:00
francisco.schulz
daddec7dc3 increment version 2023-07-18 16:59:50 +02:00
francisco.schulz
370e978fa7 upgrade dependencies, allow python>=3.8 2023-07-18 16:54:29 +02:00
Julius Unverfehrt
366d040ceb Merge branch 'RES-201-red-research-services-investigate-why-k-8-s-startup-probes-are-not-starting' into 'master'
RES-201 red research services investigate why k 8 s startup probes are not starting

See merge request knecon/research/pyinfra!74
2023-06-26 13:57:25 +02:00
francisco.schulz
9598b963ee remove dist/* files 2023-06-21 15:28:12 +02:00
francisco.schulz
2bacc4d971 update dependencies 2023-06-21 14:13:48 +02:00
francisco.schulz
d228c0a891 temporarily disable tests 2023-06-21 08:12:20 +02:00
francisco.schulz
4e6b4e2969 update dependencies 2023-06-20 17:13:26 +02:00
francisco.schulz
892b6e8236 use template CI 2023-06-20 17:13:08 +02:00
francisco.schulz
d63435e092 change k8s startup probe script to function call 2023-06-20 17:04:03 +02:00
Julius Unverfehrt
7e995bd78b Merge branch 'RES-196-red-hotfix-persistent-service-address' into 'master'
Fix: New tenant storage information endpoint

See merge request knecon/research/pyinfra!73
2023-06-15 16:29:38 +02:00
Julius Unverfehrt
c4e03d4641 Fix: New tenant storage information endpoint
Parametrize tenant enpoint and publick decryption key as environment
variable and set the default value to new endpoint.
2023-06-15 16:22:30 +02:00
Francisco Schulz
233b546f6f Merge branch 'update-azure-dependencies' into 'master'
update azure dependencies

See merge request knecon/research/pyinfra!72
2023-05-16 14:47:09 +02:00
francisco.schulz
5ed41a392a update version number 2023-05-16 14:18:46 +02:00
francisco.schulz
4a0c59b070 update deps 2023-05-16 13:42:53 +02:00
Christoph Schabert
e67ebc27b1 Merge branch 'RES-109-add-gitlab-ci' into 'master'
RES-109: add gitlab ci

See merge request knecon/research/pyinfra!71
2023-04-20 09:43:36 +02:00
francisco.schulz
309119cb62 update version 2023-04-18 15:48:50 +02:00
francisco.schulz
a381ac6b87 temp diable tests 2023-04-18 15:38:32 +02:00
francisco.schulz
6d49f0ccb9 add CI 2023-04-18 15:37:19 +02:00
Francisco Schulz
873abdca0c remove redundant files 2023-04-18 10:28:08 +02:00
Francisco Schulz
decd3710ab remove bamboo-spec 2023-04-18 10:18:35 +02:00
Julius Unverfehrt
d838413500 Pull request #70: Bugfix/RED-6273 forward processing kwargs
Merge in RR/pyinfra from bugfix/RED-6273-forward-processing-kwargs to master

Squashed commit of the following:

commit 2f45f7329dc6fd6166e08bad720e022e722737ad
Merge: 0a6d5df 0f4646e
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 28 17:55:24 2023 +0200

    Merge branch 'master' of ssh://git.iqser.com:2222/rr/pyinfra into bugfix/RED-6273-forward-processing-kwargs

commit 0a6d5dfc1a6edd8e6d171b50334b812a79f9288d
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 28 17:51:05 2023 +0200

    update pyinfra version

commit cd417c4b515d2a5d190af883af770bc660e15bb8
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 28 17:48:12 2023 +0200

    Revert poetry update

    - adds strange rust dependency for some reason
2023-03-28 17:57:20 +02:00
Julius Unverfehrt
0f4646e390 Pull request #69: fix monitoring preventing operation kwargs for processing fn getting forwarded
Merge in RR/pyinfra from bugfix/RED-6273-forward-operation-kwargs to master

Squashed commit of the following:

commit 347add07f8ea6e085064660ae79f0df9013dd9d6
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 28 17:16:41 2023 +0200

    update pyinfra version

commit 3c17047377aca666a015eaf0f06190d3dfa28c1c
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 28 17:13:59 2023 +0200

    fix monitoring preventing operation kwargs for processing fn getting forwarded
2023-03-28 17:17:09 +02:00
Julius Unverfehrt
793a427c50 Pull request #68: RED-6273 multi tenant storage
Merge in RR/pyinfra from RED-6273-multi-tenant-storage to master

Squashed commit of the following:

commit 0fead1f8b59c9187330879b4e48d48355885c27c
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 28 15:02:22 2023 +0200

    fix typos

commit 892a803726946876f8b8cd7905a0e73c419b2fb1
Author: Matthias Bisping <matthias.bisping@axbit.com>
Date:   Tue Mar 28 14:41:49 2023 +0200

    Refactoring

    Replace custom storage caching logic with LRU decorator

commit eafcd90260731e3360ce960571f07dee8f521327
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Fri Mar 24 12:50:13 2023 +0100

    fix bug in storage connection from endpoint

commit d0c9fb5b7d1c55ae2f90e8faa1efec9f7587c26a
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Fri Mar 24 11:49:34 2023 +0100

    add logs to PayloadProcessor

    - set log messages to determine if x-tenant
    storage connection is working

commit 97309fe58037b90469cf7a3de342d4749a0edfde
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Fri Mar 24 10:41:59 2023 +0100

    update PayloadProcessor

    - introduce storage cache to make every unique
    storage connection only once
    - add functionality to pass optional processing
    kwargs in queue message like the operation key to
    the processing function

commit d48e8108fdc0d463c89aaa0d672061ab7dca83a0
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Wed Mar 22 13:34:43 2023 +0100

    add multi-tenant storage connection 1st iteration

    - forward x-tenant-id from queue message header to
    payload processor
    - add functions to receive storage infos from an
    endpoint or the config. This enables hashing and
    caching of connections created from these infos
    - add function to initialize storage connections
    from storage infos
    - streamline and refactor tests to make them more
    readable and robust and to make it easier to add
     new tests
    - update payload processor with first iteration
    of multi tenancy storage connection support
    with connection caching and backwards compability

commit 52c047c47b98e62d0b834a9b9b6c0e2bb0db41e5
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 21 15:35:57 2023 +0100

    add AES/GCM cipher functions

    - decrypt x-tenant storage connection strings
2023-03-28 15:04:14 +02:00
Julius Unverfehrt
0f24a7f26d Pull request #67: fix prometheus address
Merge in RR/pyinfra from bugfix/RED-6205-prometheus-port to master

Squashed commit of the following:

commit e97d81bebfe34c24d8da4e4392ff7dbd3638e685
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 21 15:48:04 2023 +0100

    increase package version

commit c7e181a462e275c5f2cbf1e6df4c88dfefbe36b7
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 21 15:43:46 2023 +0100

    fix prometheus address

    - change loopback address to all available network interfaces to enable
    external metric scraping
    - disable ENV input for prometheus address and port since they should
    not be set in HELM
2023-03-21 15:54:47 +01:00
Julius Unverfehrt
ff6f437e84 Pull request #66: add safety mesasure for monitoring in case a service didn't find any results.
Merge in RR/pyinfra from add-safety-measure to master

* commit 'b985679d6b30b3a983c7b1df5fb23eef0dc95cd3':
  add safety mesasure for monitoring in case a service didn't find any results.
2023-03-16 17:29:12 +01:00
Julius Unverfehrt
b985679d6b add safety mesasure for monitoring in case a service didn't find any results. 2023-03-16 17:27:33 +01:00
Julius Unverfehrt
d6de45d783 Pull request #65: RED-6205 monitoring
Merge in RR/pyinfra from RED-6205-monitoring to master

Squashed commit of the following:

commit 529cedfd7c065a3f7364e4596b923f25f0af76b5
Author: Matthias Bisping <matthias.bisping@axbit.com>
Date:   Thu Mar 16 14:57:26 2023 +0100

    Remove unnecessary default argument to dict.get

commit b718531f568e89df77cc05039e5e7afe7111b9a4
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Thu Mar 16 14:56:50 2023 +0100

    refactor

commit c039b0c25a6cd2ad2a72d237d0930c484c8e427c
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Thu Mar 16 13:22:17 2023 +0100

    increase package version to reflect the recent changes

commit 0a983a4113f25cd692b68869e1f33ffbf7efc6f0
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Thu Mar 16 13:16:39 2023 +0100

    remove processing result conversion to a list, since ner-predicion service actually returns a dictionary. It is now expected that the result is sized to perform the monitoring and json dumpable to upload it.

commit 541bf321410471dc09a354669b2778402286c09f
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Thu Mar 16 12:48:07 2023 +0100

    remove no longer needed requirements

commit cfa182985d989a5b92a9a069a603daee72f37d49
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Thu Mar 16 11:14:58 2023 +0100

    refactor payload formatting

    - introduce PayloadFormatter class for better typehinting and bundling
    of functionality
    - parametrize payload formatting so the PayloadProcesser can adapt
    better to differnt services/products
    - move file extension parsing to its own module

commit f57663b86954b7164eeb6db013d862af88ec4584
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Wed Mar 15 12:22:08 2023 +0100

    refactor payload parsing

    - introduce QueueMessagePayloadParser for generality
    and typehinting
    - refactor file extension parsing algorithm

commit 713fb4a0dddecf5442ceda3988444d9887869dcf
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 14 17:07:02 2023 +0100

    fix tests

commit a22ecf7ae93bc0bec235fba3fd9cbf6c1778aa13
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 14 16:31:26 2023 +0100

    refactor payload parsing

    - parameterize file and compression types allowed for files to download
    and upload via config
    - make a real value bag out of QueueMessagePayload and do the parsing
    beforehand
    - refector file extension parser to be more robust

commit 50b578d054ca47a94c907f5f8b585eca7ed626ac
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 14 13:21:32 2023 +0100

    add monitoring

    - add an optional prometheus monitor to monitor the average processing
    time of a service per relevent paramater that is at this point defined
    via the number of resulting elements.

commit de525e7fa2f846f7fde5b9a4b466039238da10cd
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 14 12:57:24 2023 +0100

    fix bug in file extension parser not working if the file endings have prefixes
2023-03-16 16:08:44 +01:00
Christoph Schabert
564c429834 Pull request #64: update java version for sonar-scan
Merge in RR/pyinfra from cschabert/PlanSpecjava-1678717832322 to master

Squashed commit of the following:

commit 3ae2b191e777739738d91d114c376ac78efa193f
Author: Christoph Schabert <christoph.schabert@iqser.com>
Date:   Tue Mar 14 08:36:54 2023 +0100

    PlanSpec.java edited online with Bitbucket

commit 2aa012242c77958701ca7b3400ed4b3272cd7d95
Author: Christoph Schabert <christoph.schabert@iqser.com>
Date:   Tue Mar 14 08:34:40 2023 +0100

    sonar-scan.sh edited online with Bitbucket

commit 2dd8c21229f40f4972b632702c4bcf4ad71bf7ae
Author: Christoph Schabert <christoph.schabert@iqser.com>
Date:   Tue Mar 14 08:33:50 2023 +0100

    sonar-scan.sh edited online with Bitbucket

commit 8837c31d664a7cb913ac538c9403871352b014a3
Author: Christoph Schabert <christoph.schabert@iqser.com>
Date:   Tue Mar 14 08:33:17 2023 +0100

    sonar-scan.sh edited online with Bitbucket

commit 0de23c519fcbb9f991a85389fe1644af4256266b
Author: Christoph Schabert <christoph.schabert@iqser.com>
Date:   Tue Mar 14 08:28:00 2023 +0100

    config-keys.sh edited online with Bitbucket

commit 4f971967e5055e368bc3c779f7f400bbf9b86a42
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 14 08:22:17 2023 +0100

    update bamboo agent username

commit 37fa1bbf9f83ec3d242a32e2051b6f1615102307
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 14 08:08:46 2023 +0100

    remove venv install

commit 44180f403ac8a5b1b33090081c45e30121dbae8d
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 14 08:07:13 2023 +0100

    add venv install

commit eac141bf8f430af3f7406a89df5147cd93231278
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Mar 14 08:05:51 2023 +0100

    add venv install

commit 24b37f9f83db20e90d3bd528f4111f524b7485c5
Author: Christoph Schabert <christoph.schabert@iqser.com>
Date:   Mon Mar 13 15:47:03 2023 +0100

    Set new image for Sonar Scan

commit b734389316f60b2fdbe4bdcdf00d1f2f14e61266
Author: Christoph Schabert <christoph.schabert@iqser.com>
Date:   Mon Mar 13 15:30:45 2023 +0100

    update java version for sonar-scan
2023-03-14 08:39:41 +01:00
Julius Unverfehrt
3c4739ad8b Pull request #63: RED-6366 refactor
Merge in RR/pyinfra from RED-6366-refactor to master

Squashed commit of the following:

commit 8807cda514b5cc24b1be208173283275d87dcb97
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Fri Mar 10 13:15:15 2023 +0100

    enable docker-compose autouse for automatic tests

commit c4579581d3e9a885ef387ee97f3f3a5cf4731193
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Fri Mar 10 12:35:49 2023 +0100

    black

commit ac2b754c5624ef37ce310fce7196c9ea11bbca03
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Fri Mar 10 12:30:23 2023 +0100

    refactor storage url parsing

    - move parsing and validation to config where the connection url is
    actually read in
    - improve readability of parsing fn

commit 371802cc10b6d946c4939ff6839571002a2cb9f4
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Fri Mar 10 10:48:00 2023 +0100

    refactor

commit e8c381c29deebf663e665920752c2965d7abce16
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Fri Mar 10 09:57:34 2023 +0100

    rename

commit c8628a509316a651960dfa806d5fe6aacb7a91c1
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Fri Mar 10 09:37:01 2023 +0100

    renaming and refactoring

commit 4974d4f56fd73bc55bd76aa7a9bbb16babee19f4
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Fri Mar 10 08:53:09 2023 +0100

    refactor payload processor

    - limit make_uploader and make_downloader cache
    - partially apply them when the class is initialized with storage and
    bucket to make the logic and behaviour more comprehensive
    - renaming functional pipeline steps to be more expressive

commit f8d51bfcad2b815c8293ab27dd66b256255c5414
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Thu Mar 9 15:30:32 2023 +0100

    remove monitor and rename Payload

commit 412ddaa207a08aff1229d7acd5d95402ac8cd578
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Thu Mar 2 10:15:39 2023 +0100

    remove azure connection string and disable respective test for now for security reasons

commit 7922a2d9d325f3b9008ad4e3e56b241ba179f52c
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Wed Mar 1 13:30:58 2023 +0100

    make payload formatting function names more expressive

commit 7517e544b0f5a434579cc9bada3a37e7ac04059f
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Wed Mar 1 13:24:57 2023 +0100

    add some type hints

commit 095410d3009f2dcbd374680dd0f7b55de94c9e76
Author: Matthias Bisping <matthias.bisping@axbit.com>
Date:   Wed Mar 1 10:54:58 2023 +0100

    Refactoring

    - Renaming
    - Docstring adjustments

commit e992f0715fc2636eb13eb5ffc4de0bcc5d433fc8
Author: Matthias Bisping <matthias.bisping@axbit.com>
Date:   Wed Mar 1 09:43:26 2023 +0100

    Re-wording and typo fixes

commit 3c2d698f9bf980bc4b378a44dc20c2badc407b3e
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Feb 28 14:59:59 2023 +0100

    enable auto startup for docker compose in tests

commit 55773b4fb0b624ca4745e5b8aeafa6f6a0ae6436
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Feb 28 14:59:37 2023 +0100

    Extended tests for queue manager

commit 14f7f943f60b9bfb9fe77fa3cef99a1e7d094333
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Feb 28 13:39:00 2023 +0100

    enable auto startup for docker compose in tests

commit 7caf354491c84c6e0b0e09ad4d41cb5dfbfdb225
Merge: 49d47ba d0277b8
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Feb 28 13:32:52 2023 +0100

    Merge branch 'RED-6205-prometheus' of ssh://git.iqser.com:2222/rr/pyinfra into RED-6205-prometheus

commit 49d47baba8ccf11dee48a4c1cbddc3bbd12471e5
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Feb 28 13:32:42 2023 +0100

    adjust Payload Processor signature

commit d0277b86bc54994b6032774bf0ec2d7b19d7f517
Merge: 5184a18 f6b35d6
Author: Christoph Schabert <christoph.schabert@iqser.com>
Date:   Tue Feb 28 11:07:16 2023 +0100

    Pull request #61: Change Sec Trigger to PR

    Merge in RR/pyinfra from cschabert/PlanSpecjava-1677578703647 to RED-6205-prometheus

    * commit 'f6b35d648c88ddbce1856445c3b887bce669265c':
      Change Sec Trigger to PR

commit f6b35d648c88ddbce1856445c3b887bce669265c
Author: Christoph Schabert <christoph.schabert@iqser.com>
Date:   Tue Feb 28 11:05:13 2023 +0100

    Change Sec Trigger to PR

... and 20 more commits
2023-03-13 15:11:25 +01:00
Julius Unverfehrt
46157031b5 Pull request #59: adjust response headers
Merge in RR/pyinfra from RED-6118-multi-tenancy-patch to master

Squashed commit of the following:

commit 02e471622e59baf5d2bb5c61980cea43ca1c6d61
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Thu Feb 16 16:36:19 2023 +0100

    move acknowledgment function to outer scope

commit f9efffd8e6d90d5e371c66574b1afe361a1da146
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Thu Feb 16 16:04:07 2023 +0100

    adjust response headers

    - change response formatting: only forward the
    request message headers instead of all properties
    - adjust build script to only increase patch
    version on master push
2023-02-16 16:37:57 +01:00
Julius Unverfehrt
c97ae3d2c2 Pull request #56: RED-6118 multi tenancy
Merge in RR/pyinfra from RED-6118-multi-tenancy to master

Squashed commit of the following:

commit 0a1301f9d7a12a1097e6bf9a1bb0a94025312d0a
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Thu Feb 16 09:12:54 2023 +0100

    delete (for now) not needed exception module

commit 9b624f9c95c129bf186eaea8405a14d359ccb1ae
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Thu Feb 16 09:08:57 2023 +0100

    implement message properties forwarding

    - revert tenant validation logic since this functionality is not wanted
    - implement request message properties forwarding to response message.
    Thus, all message headers including x-tenant-id are present in the
    reponse.

commit ddac812d32eeec09d9434c32595875eb354767f8
Merge: ed4b495 6828c65
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Wed Feb 15 17:00:54 2023 +0100

    Merge branch 'master' of ssh://git.iqser.com:2222/rr/pyinfra into RED-6118-multi-tenancy

commit ed4b4956c6cb6d201fc29b0318078dfb8fa99006
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Wed Feb 15 10:00:28 2023 +0100

    refactor

commit 970fd72aa73ace97d36f129031fb143209c5076b
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Feb 14 17:22:54 2023 +0100

    RED-6118 make pyinfra multi-tenant ready

    - refactor message validation logic
    - add tenant validation step:
    	- messages without header/tenant id are accepted for now, until
    	  multi-tenancy is implemented in backend
    	- only valid tenant is 'redaction'

commit 0f04e799620e01b3346eeaf86f3e941830824202
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Feb 14 15:42:28 2023 +0100

    add dev scripts

    - add scripts to ease pyinfra development by allowing to run pyinfra
    locally with callback mock and publishing script.
2023-02-16 09:44:43 +01:00
Francisco Schulz
6828c65396 Pull request #58: fix version conflict
Merge in RR/pyinfra from hotfix/version-conflict-during-build to master

* commit '73bfef686782112d448469ec14d84cab5965f318':
  increment version
2023-02-15 16:14:55 +01:00
Francisco Schulz
73bfef6867 increment version 2023-02-15 16:12:22 +01:00
Francisco Schulz
1af171bd3f Pull request #57: Bugfix/RED-5277 investigate missing heartbeat error
Merge in RR/pyinfra from bugfix/RED-5277-investigate-missing-heartbeat-error to master

Squashed commit of the following:

commit 9e139e79e46c52014986f9afb2c6534281b55c10
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Wed Feb 15 14:56:44 2023 +0100

    RED-5277: Moved async processing to its own functions

commit 244a941299dbf75b254adcad8b068b2917c6bf79
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Wed Feb 15 11:26:00 2023 +0100

    Revert "only set git tag on release and master branches"

    This reverts commit 9066856d223f0646723fa1c62c444e16a9bb3ce9.

commit adb35db6fa6daf4b79263a918716c34905e8b3bc
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Wed Feb 15 11:11:07 2023 +0100

    increment version

commit 9066856d223f0646723fa1c62c444e16a9bb3ce9
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Wed Feb 15 11:10:49 2023 +0100

    only set git tag on release and master branches

commit ee11e018efdbc63a740008e7fa2415cbb12476ae
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Wed Feb 15 10:18:08 2023 +0100

    configure root logger in `__init__.py`
    only set log levels for other loggers, inherit config

commit 776399912ddf1e936138cceb2af981f27d333823
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Wed Feb 15 10:16:57 2023 +0100

    update dependency via `poetry update`

commit 804a8d9fbd1ded3e154fe9b3cafa32428522ca0f
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Wed Feb 15 10:16:25 2023 +0100

    increment version

commit cf057daed23d5f5b0f6f3a1a31e956e015e86368
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Tue Feb 14 17:59:55 2023 +0100

    update

commit 51717d85fce592b8bf38a8b5235faa04379cce1a
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Tue Feb 14 17:48:51 2023 +0100

    define sonar source

commit ace57c211a61d8e473a700da161806f882b19dc6
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Tue Feb 14 17:46:24 2023 +0100

    update plan

commit 1fcc00eb18ed692e2646873b4a233a00b5f6d93b
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Tue Feb 14 17:46:13 2023 +0100

    fix typo

commit 20b59768a68d985e1bf2fe6f93a1e6283bac5cb0
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Tue Feb 14 17:43:39 2023 +0100

    increment version

commit 8e7b4bf302b5591b2c490ad89c8a01a87c5b4741
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Tue Feb 14 17:11:59 2023 +0100

    get rid of extra logger

commit 3fd3eb255c252d1e208b88b475ec8a07c521619d
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Tue Feb 14 16:45:56 2023 +0100

    increment version

commit b0b5e5ebd94554cdafed6cff333d73a9ba08bea1
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Tue Feb 14 16:40:22 2023 +0100

    update

commit b87b3c351722d6949833c397178bc0354c754d90
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Tue Feb 14 16:38:41 2023 +0100

    fix tag issue from build

commit 73f3dcb280b6f905eeef3c69123b1252e6c934b1
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Tue Feb 14 14:21:57 2023 +0100

    add comments & update logging

commit 72a9e2c51f5bf98fc9f0803183fc1d28aaea9e35
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Tue Feb 14 12:06:09 2023 +0100

    cleanup comments

commit 587814944921f0f148e4d3c4c76d4edffff55bba
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Tue Feb 14 11:16:17 2023 +0100

    use thread executor in a `with` statement

commit 9561a6b447d98d2f0d536f63c0946d7bf1e2ca7d
Author: Francisco Schulz <Francisco.Schulz@iqser.com>
Date:   Tue Feb 14 10:42:49 2023 +0100

    fix unbound issue `callback_result` & shutdown thread executor

... and 23 more commits
2023-02-15 16:02:17 +01:00
Francisco Schulz
61efbdaffd Pull request #55: add master to non-dev branches
Merge in RR/pyinfra from bugfix/add-master-to-non-dev-branches to master

* commit 'c94604cc666ec4a9d3803c949f228cbf4291aaf2':
  add master to non-dev branches
2022-11-16 10:44:43 +01:00
Francisco Schulz
c94604cc66 add master to non-dev branches 2022-11-16 10:35:33 +01:00
Francisco Schulz
edbe5fa4f0 Pull request #54: Feature/MLOPS-32 update pyinfra to use pypoetry.toml
Merge in RR/pyinfra from feature/MLOPS-32-update-pyinfra-to-use-pypoetry.toml to master

* commit '37d8ee49a22ab9ee81792217404ed0a7daea65c2': (34 commits)
  add convenience command for version updates
  testing version is ahead in project
  test equal version number
  echo latest git version tag
  update tag fetching
  rollback
  testing hardcoded
  remove specific planRepository
  remove parentheses
  change project key
  add planRepositories config
  fix typo: licence -> license
  ignore bamboo YAML configs
  switch back to bamboo Java config
  update version tag manually
  remove superfulous `then`
  isolate feature/bugfix/hotfix and dev tag setting
  fix script `echo` was missing
  add version update shortcut
  show pyproject.toml file
  ...
2022-11-15 16:03:59 +01:00
Francisco Schulz
37d8ee49a2 add convenience command for version updates 2022-11-15 15:56:54 +01:00
Francisco Schulz
7732e884c5 testing version is ahead in project 2022-11-15 15:56:35 +01:00
Francisco Schulz
40e516b4e8 test equal version number 2022-11-15 15:47:32 +01:00
Francisco Schulz
c8c0210945 echo latest git version tag 2022-11-15 15:41:12 +01:00
Francisco Schulz
280b14b4a0 update tag fetching 2022-11-15 15:13:19 +01:00
Kevin Tumma
203c0f669c rollback 2022-11-15 14:14:12 +01:00
Kevin Tumma
8227e18580 testing hardcoded 2022-11-15 14:07:23 +01:00
Francisco Schulz
73bb38f917 remove specific planRepository 2022-11-15 13:18:25 +01:00
Francisco Schulz
fa76003983 remove parentheses 2022-11-15 11:40:16 +01:00
Francisco Schulz
19540c7c08 change project key 2022-11-15 11:39:08 +01:00
Francisco Schulz
2fe4a75a57 add planRepositories config 2022-11-15 11:37:40 +01:00
Francisco Schulz
b5deb7b292 fix typo: licence -> license 2022-11-15 09:28:41 +01:00
Francisco Schulz
5cd30c08b3 ignore bamboo YAML configs 2022-11-15 09:00:46 +01:00
Francisco Schulz
17cbbeb620 switch back to bamboo Java config 2022-11-15 08:59:47 +01:00
Francisco Schulz
9c4cf3d220 update version tag manually 2022-11-14 17:04:30 +01:00
Francisco Schulz
3ccb1d2370 remove superfulous then 2022-11-14 16:26:19 +01:00
Francisco Schulz
398b1c271f isolate feature/bugfix/hotfix and dev tag setting 2022-11-14 16:20:16 +01:00
Francisco Schulz
05658784be fix script
`echo` was missing
2022-11-14 15:49:44 +01:00
Francisco Schulz
974df96bb9 add version update shortcut 2022-11-14 15:49:17 +01:00
Francisco Schulz
ca3f812527 show pyproject.toml file 2022-11-14 15:38:10 +01:00
Francisco Schulz
d78e6c45fb separate git-tag into own stage 2022-11-14 15:37:56 +01:00
Francisco Schulz
41220d3c80 increment version 2022-11-14 10:39:36 +01:00
Francisco Schulz
2d2e72c86e remove redundancies 2022-11-14 10:39:30 +01:00
Francisco Schulz
37b0280ab6 update tag logic 2022-11-10 17:38:18 +01:00
Francisco Schulz
4bd6ee867f fix circumflex formatting 2022-11-10 16:52:11 +01:00
Francisco Schulz
b0efed4007 update regex 2022-11-10 16:45:32 +01:00
Francisco Schulz
28ee14e92f echo bamboo vars 2022-11-10 16:04:38 +01:00
Francisco Schulz
fb3d4b5fc9 add sonar config 2022-11-10 15:25:44 +01:00
Francisco Schulz
84351fd75c fix formatting issue 2022-11-10 13:24:51 +01:00
Francisco Schulz
244aaec470 use inline config-keys script opposed to file 2022-11-10 13:21:38 +01:00
Francisco Schulz
18d614f61c use bamboo config YAML 2022-11-10 13:17:01 +01:00
Francisco Schulz
7472939f21 ignore checks for bamboo.yml
otherwise check-yaml throws multi-file exception
2022-11-10 13:16:54 +01:00
Francisco Schulz
a819e60632 update 2022-11-10 13:09:09 +01:00
Francisco Schulz
05d5582479 convert into python package
- remove build specs
- move pytest.ini into pyproject.toml
- update readme
- add pre-commit config
- run formatters
- add Makefile
2022-11-03 16:10:12 +01:00
Francisco Schulz
64d6a8cec6 Pull request #53: Feature/MLOPS-23 pyinfra does not use the value of the storage azurecontainername environment
Merge in RR/pyinfra from feature/MLOPS-23-pyinfra-does-not-use-the-value-of-the-storage_azurecontainername-environment to master

* commit '7a740403bb65db97c8e4cb54de00aac3536b2e4c':
  update
  update test config
  update test config
  add pytests to check if a configured bucket can be found
  add submodule initialization
  load different env vars for the  variable depending on the set
2022-10-13 15:09:53 +02:00
Francisco Schulz
7a740403bb update 2022-10-13 14:04:00 +02:00
Francisco Schulz
b2cd529519 update test config 2022-10-13 13:53:53 +02:00
Francisco Schulz
1891519e19 update test config 2022-10-13 13:17:31 +02:00
Francisco Schulz
843d91c61a add pytests to check if a configured bucket can be found 2022-10-13 11:26:58 +02:00
Francisco Schulz
5b948fdcc5 add submodule initialization 2022-10-13 11:01:29 +02:00
Francisco Schulz
bb5b73e189 load different env vars for the variable depending on the set 2022-10-13 10:29:57 +02:00
Viktor Seifert
94beb544fa Pull request #52: RED-5324: Added missing storage-region to key to config and to minio-client creation so that storage access works on s3
Merge in RR/pyinfra from RED-5324 to master

* commit 'ffaa4a668b447fe3f4708d99ec6fccec14f85693':
  RED-5324: Added missing storage-region to key to config and to minio-client creation so that storage access works on s3
2022-09-30 15:15:47 +02:00
Viktor Seifert
ffaa4a668b RED-5324: Added missing storage-region to key to config and to minio-client creation so that storage access works on s3 2022-09-30 15:12:58 +02:00
Julius Unverfehrt
88b4c5c7ce Pull request #51: RED-5009 pyinfra now truly rejects messages that couldn't be processed by the callback
Merge in RR/pyinfra from RED-5009-fix-ack-bug to master

Squashed commit of the following:

commit 7b00edf6fe1167345e774d658fcd2e60c01d05d5
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Wed Aug 24 14:52:57 2022 +0200

    RED-5009 pyinfra now truly rejects messages that couldn't be processed by the callback (e.g. inobtainable storage file)
2022-08-24 15:00:00 +02:00
Viktor Seifert
71ad2af4eb Pull request #50: RED-5009: Changed callback to not process redelivered messages to prevent endless retries
Merge in RR/pyinfra from RED-5009 to master

Squashed commit of the following:

commit 1f8114379bdeb3af8640c71c2edde2a672bb358c
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Mon Aug 22 16:55:04 2022 +0200

    RED-5009: Added the possibility for a callback to signal that a message should be declined/dead-lettered

commit be674c2915f6f149c581bc2fe2783217fe424df8
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Fri Aug 19 16:26:38 2022 +0200

    RED-5009: Changed callback to not process redelivered messages to prevent endless retries
2022-08-23 10:22:13 +02:00
Julius Unverfehrt
be82114f83 Pull request #49: Add exists to storage
Merge in RR/pyinfra from add-exists-to-storage to master

Squashed commit of the following:

commit 48d5e1c9e103702bfebfc115e811576514e115c3
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Fri Aug 12 13:32:40 2022 +0200

    refactor

commit 711d2c8dbf7c78e26133e3ea3a57670fe829059b
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Fri Aug 12 11:45:42 2022 +0200

    add method to check if objects exists for azure and s3
2022-08-12 13:35:12 +02:00
Viktor Seifert
0f6512df54 Pull request #48: RED-4653: Changed token-file path to the temp dir
Merge in RR/pyinfra from RED-4653 to master

* commit '8b050fe9b16cbea37b4becf7de54b25a9a4dbf63':
  RED-4653: Changed token-file path to the temp dir
2022-08-02 10:52:42 +02:00
Viktor Seifert
8b050fe9b1 RED-4653: Changed token-file path to the temp dir 2022-08-02 10:44:56 +02:00
Viktor Seifert
046f26d0e9 Pull request #47: RED-4653
Merge in RR/pyinfra from RED-4653 to master

* commit '7e2cb20040a6be7510f5b06b0a522c1b044d5ee3':
  RED-4653: Corrected if-operator
  RED-4653: Added value to config to prevent writing the token as a default since that is only useful in a container
  RED-4653: Implemented a startup probe for k8s
2022-08-02 09:59:35 +02:00
Viktor Seifert
7e2cb20040 RED-4653: Corrected if-operator 2022-08-01 17:38:37 +02:00
Viktor Seifert
8867da3557 RED-4653: Added value to config to prevent writing the token as a default since that is only useful in a container 2022-08-01 17:00:44 +02:00
Viktor Seifert
eed5912516 RED-4653: Implemented a startup probe for k8s 2022-08-01 16:19:13 +02:00
Viktor Seifert
3ccc4a1547 Pull request #46: RED-4653
Merge in RR/pyinfra from RED-4653 to master

* commit '0efbd2c98cecaa1e33991473b1b120827df60ae9':
  RED-4653: Removed unnecessary string formatting
  RED-4653: Reordered code to prevent errors on application shutdown
  RED-4653: Changed code to close only the connection instead of the channel & connection to see if that is sufficient for a clean shutdown
  RED-4653: Added some debugging code to test if closing the connection needed
  RED-4653: Corrected exception block to not swallow exceptions
  RED-4653: Switch to closing channel instead of only cancelling subscription on shutdown.
  RED-4653: Corrected signal handler by correctly handling passed params
2022-08-01 14:29:35 +02:00
Viktor Seifert
0efbd2c98c RED-4653: Removed unnecessary string formatting 2022-08-01 14:15:03 +02:00
Viktor Seifert
89ce61996c RED-4653: Reordered code to prevent errors on application shutdown 2022-08-01 14:08:58 +02:00
Viktor Seifert
2cffab279d RED-4653: Changed code to close only the connection instead of the channel & connection to see if that is sufficient for a clean shutdown 2022-08-01 13:39:09 +02:00
Viktor Seifert
76985e83ed RED-4653: Added some debugging code to test if closing the connection needed 2022-08-01 13:25:53 +02:00
Viktor Seifert
bbf013385a RED-4653: Corrected exception block to not swallow exceptions 2022-08-01 13:17:01 +02:00
Viktor Seifert
5cdf4df4a3 RED-4653: Switch to closing channel instead of only cancelling subscription on shutdown.
Changed queue-consumption shutdown to close the channel before closing the connection, since only cancelling the consumers doesn't clean-up the channel correctly, which in turn can cause an error when closing the connection.  Also reordered the code so that the connection and channel are only opened when queue-consumption starts.
2022-08-01 12:25:27 +02:00
Viktor Seifert
fc1f23a24d RED-4653: Corrected signal handler by correctly handling passed params 2022-08-01 11:26:15 +02:00
Isaac Riley
6c2652837a Pull request #45: clean up config hygiene; align queue manager and storage signature
Merge in RR/pyinfra from tidy_up to master

* commit 'db8f617aa78698760e5aaa198445d349755366a1':
  clean up config hygiene; align queue manager and storage signature
2022-07-26 15:12:36 +02:00
Isaac Riley
db8f617aa7 clean up config hygiene; align queue manager and storage signature 2022-07-26 14:56:21 +02:00
Viktor Seifert
e3abf2be0f Pull request #44: RED-4653
Merge in RR/pyinfra from RED-4653 to master

Squashed commit of the following:

commit 14ed6d2ee79f9a6bc4bad187dc775f7476a05d97
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Tue Jul 26 11:08:16 2022 +0200

    RED-4653: Disabled coverage check since there not tests at the moment

commit e926631b167d03e8cc0867db5b5c7d44d6612dcf
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Tue Jul 26 10:58:50 2022 +0200

    RED-4653: Re-added test execution scripts

commit 94648cc449bbc392864197a1796f99f8953b7312
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Tue Jul 26 10:50:42 2022 +0200

    RED-4653: Changed error case for processing messages to not requeue the message since that will be handled in DLQ logic

commit d77982dfedcec49482293d79818283c8d7a17dc7
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Tue Jul 26 10:46:32 2022 +0200

    RED-4653: Removed unnecessary logging message

commit 8c00fd75bf04f8ecc0e9cda654f8e053d4cfb66f
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Tue Jul 26 10:03:35 2022 +0200

    RED-4653: Re-added wrongly removed config

commit 759d72b3fa093b19f97e68d17bf53390cd5453c7
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Tue Jul 26 09:57:47 2022 +0200

    RED-4653: Removed leftover Docker commands

commit 2ff5897ee38e39d6507278b6a82176be2450da16
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Tue Jul 26 09:48:08 2022 +0200

    RED-4653: Removed leftover Docker config

commit 1074167aa98f9f59c0f0f534ba2f1ba09ffb0958
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Tue Jul 26 09:41:21 2022 +0200

    RED-4653: Removed Docker build stage since it is not needed for a project that is used as a Python module

commit ec769c6cd74a74097d8ebe4800ea6e2ea86236cc
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Mon Jul 25 16:11:50 2022 +0200

    RED-4653: Renamed function for better clarity and consistency

commit 96e8ac4316ac57aac90066f35422d333c532513b
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Mon Jul 25 15:07:40 2022 +0200

    RED-4653: Added code to cancel the queue subscription on application exit to queue manager so that it can exit gracefully

commit 64d8e0bd15730898274c08d34f9c34fbac559422
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Mon Jul 25 13:57:06 2022 +0200

    RED-4653: Moved queue cancellation to a separate method so that it can be called on application exit

commit aff1d06364f5694c5922f37d961e401c12243221
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Mon Jul 25 11:51:16 2022 +0200

    RED-4653: Re-ordered message processing so that ack occurs after publishing the result, to prevent message loss

commit 9339186b86f2fe9653366c22fcdc9f7fc096b138
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Fri Jul 22 18:07:25 2022 +0200

    RED-4653: RED-4653: Reordered code to acknowledge message before publishing a result message

commit 2d6fe1cbd95cd86832b086c6dfbcfa62b3ffa16f
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Fri Jul 22 17:00:04 2022 +0200

    RED-4653: Hopefully corrected storage bucket env var name

commit 8f1ef0dd5532882cb12901721195d9acb336286c
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Fri Jul 22 16:37:27 2022 +0200

    RED-4653: Switched to validating the connection url via a regex since the validators lib parses our endpoints incorrectly

commit 8d0234fcc5ff7ed1ae7695a17856c6af050065bd
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Fri Jul 22 15:02:54 2022 +0200

    RED-4653: Corrected exception creation

commit 098a62335b3b695ee409363d429ac07284de7138
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Fri Jul 22 14:42:22 2022 +0200

    RED-4653: Added a descriptive error message when the storage endpoint is nor a correct url

commit 379685f964a4de641ce6506713f1ea8914a3f5ab
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Fri Jul 22 14:11:48 2022 +0200

    RED-4653: Removed variable re-use to make the code clearer

commit 4bf1a023453635568e16b1678ef5ad994c534045
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Thu Jul 21 17:41:55 2022 +0200

    RED-4653: Added explicit conversion of the heartbeat config value to an int before passing it to pika

commit 8f2bc4e028aafdef893458d1433a05724f534fce
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Mon Jul 18 16:41:31 2022 +0200

    RED-4653: Set heartbeat to lower value so that disconnects are detected more quickly

... and 6 more commits
2022-07-26 13:15:07 +02:00
Julius Unverfehrt
3f645484d9 Pull request #41: RED-4564
Merge in RR/pyinfra from RED-4564 to master

Squashed commit of the following:

commit bf4c85a0ab9fed19a44508f2cbef6858cbb32259
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Fri Jul 8 15:46:11 2022 +0200

    RED-4564: POC-test to see if cancelling the consumer prevents messages from being stuck

commit 12ebd186b220f263ac2275463b0c124e8f4210fc
Author: Viktor Seifert <viktor.seifert@iqser.com>
Date:   Fri Jul 8 14:32:05 2022 +0200

    RED-4564: Print full exception with traceback when processing from the queue
2022-07-11 11:28:18 +02:00
103 changed files with 37461 additions and 2345 deletions

View File

@ -1,106 +0,0 @@
data
/build_venv/
/.venv/
/misc/
/incl/image_service/test/
/scratch/
/bamboo-specs/
README.md
Dockerfile
*idea
*misc
*egg-innfo
*pycache*
# Git
.git
.gitignore
# CI
.codeclimate.yml
.travis.yml
.taskcluster.yml
# Docker
.docker
# Byte-compiled / optimized / DLL files
__pycache__/
*/__pycache__/
*/*/__pycache__/
*/*/*/__pycache__/
*.py[cod]
*/*.py[cod]
*/*/*.py[cod]
*/*/*/*.py[cod]
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/**
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.cache
nosetests.xml
coverage.xml
# Translations
*.mo
*.pot
# Django stuff:
*.log
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Virtual environment
.env/
.venv/
#venv/
# PyCharm
.idea
# Python mode for VIM
.ropeproject
*/.ropeproject
*/*/.ropeproject
*/*/*/.ropeproject
# Vim swap files
*.swp
*/*.swp
*/*/*.swp
*/*/*/*.swp

2
.dvc/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/config.local
/cache

5
.dvc/config Normal file
View File

@ -0,0 +1,5 @@
[core]
remote = azure
['remote "azure"']
url = azure://pyinfra-dvc
connection_string =

3
.dvcignore Normal file
View File

@ -0,0 +1,3 @@
# Add patterns of files dvc should ignore, which could improve
# the performance. Learn more at
# https://dvc.org/doc/user-guide/dvcignore

57
.gitignore vendored
View File

@ -1,10 +1,53 @@
# Environments
.env
.venv
__pycache__
data/
env/
venv/
.DS_Store
# Project folders
*.vscode/
.idea
*_app
*pytest_cache
*joblib
*tmp
*profiling
*logs
*docker
*drivers
*bamboo-specs/target
.coverage
data
build_venv
reports
pyinfra.egg-info
bamboo-specs/target
.pytest_cache
/.coverage
.idea
# Python specific files
__pycache__/
*.py[cod]
*.ipynb
*.ipynb_checkpoints
# file extensions
*.log
*.csv
*.pkl
*.profile
*.cbm
*.egg-info
# temp files
*.swp
*~
*.un~
# keep files
!notebooks/*.ipynb
# keep folders
!secrets
!data/*
!drivers
# ignore files
bamboo.yml

23
.gitlab-ci.yml Normal file
View File

@ -0,0 +1,23 @@
# CI for services, check gitlab repo for python package CI
include:
- project: "Gitlab/gitlab"
ref: main
file: "/ci-templates/research/python_pkg-test-build-release.gitlab-ci.yml"
# set project variables here
variables:
NEXUS_PROJECT_DIR: research # subfolder in Nexus docker-gin where your container will be stored
IMAGENAME: $CI_PROJECT_NAME # if the project URL is gitlab.example.com/group-name/project-1, CI_PROJECT_NAME is project-1
REPORTS_DIR: reports
FF_USE_FASTZIP: "true" # enable fastzip - a faster zip implementation that also supports level configuration.
ARTIFACT_COMPRESSION_LEVEL: default # can also be set to fastest, fast, slow and slowest. If just enabling fastzip is not enough try setting this to fastest or fast.
CACHE_COMPRESSION_LEVEL: default # same as above, but for caches
# TRANSFER_METER_FREQUENCY: 5s # will display transfer progress every 5 seconds for artifacts and remote caches. For debugging purposes.
############
# UNIT TESTS
unit-tests:
variables:
###### UPDATE/EDIT ######
UNIT_TEST_DIR: "tests/unit_test"

55
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,55 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
exclude: ^(docs/|notebooks/|data/|src/configs/|tests/|.hooks/)
default_language_version:
python: python3.10
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
name: Check Gitlab CI (unsafe)
args: [--unsafe]
files: .gitlab-ci.yml
- id: check-yaml
exclude: .gitlab-ci.yml
- id: check-toml
- id: detect-private-key
- id: check-added-large-files
args: ['--maxkb=10000']
- id: check-case-conflict
- id: mixed-line-ending
- repo: https://github.com/pre-commit/mirrors-pylint
rev: v3.0.0a5
hooks:
- id: pylint
language: system
args:
- --disable=C0111,R0903
- --max-line-length=120
- repo: https://github.com/pre-commit/mirrors-isort
rev: v5.10.1
hooks:
- id: isort
args:
- --profile black
- repo: https://github.com/psf/black
rev: 24.10.0
hooks:
- id: black
# exclude: ^(docs/|notebooks/|data/|src/secrets/)
args:
- --line-length=120
- repo: https://github.com/compilerla/conventional-pre-commit
rev: v3.6.0
hooks:
- id: conventional-pre-commit
pass_filenames: false
stages: [commit-msg]
# args: [] # optional: list of Conventional Commits types to allow e.g. [feat, fix, ci, chore, test]

1
.python-version Normal file
View File

@ -0,0 +1 @@
3.10

View File

@ -1,19 +0,0 @@
FROM python:3.8
# Use a virtual environment.
RUN python -m venv /app/venv
ENV PATH="/app/venv/bin:$PATH"
# Upgrade pip.
RUN python -m pip install --upgrade pip
# Make a directory for the service files and copy the service repo into the container.
WORKDIR /app/service
COPY . .
# Install module & dependencies
RUN python3 -m pip install -e .
RUN python3 -m pip install -r requirements.txt
# Run the service loop.
CMD ["python", "src/serve.py"]

View File

@ -1,19 +0,0 @@
ARG BASE_ROOT="nexus.iqser.com:5001/red/"
ARG VERSION_TAG="dev"
FROM ${BASE_ROOT}pyinfra:${VERSION_TAG}
EXPOSE 5000
EXPOSE 8080
RUN python3 -m pip install coverage
# Make a directory for the service files and copy the service repo into the container.
WORKDIR /app/service
COPY . .
# Install module & dependencies
RUN python3 -m pip install -e .
RUN python3 -m pip install -r requirements.txt
CMD coverage run -m pytest test/ -x && coverage report -m && coverage xml

85
Makefile Normal file
View File

@ -0,0 +1,85 @@
.PHONY: \
poetry in-project-venv dev-env use-env install install-dev tests \
update-version sync-version-with-git \
docker docker-build-run docker-build docker-run \
docker-rm docker-rm-container docker-rm-image \
pre-commit get-licenses prep-commit \
docs sphinx_html sphinx_apidoc
.DEFAULT_GOAL := run
export DOCKER=docker
export DOCKERFILE=Dockerfile
export IMAGE_NAME=rule_engine-image
export CONTAINER_NAME=rule_engine-container
export HOST_PORT=9999
export CONTAINER_PORT=9999
export PYTHON_VERSION=python3.8
# all commands should be executed in the root dir or the project,
# specific environments should be deactivated
poetry: in-project-venv use-env dev-env
in-project-venv:
poetry config virtualenvs.in-project true
use-env:
poetry env use ${PYTHON_VERSION}
dev-env:
poetry install --with dev
install:
poetry add $(pkg)
install-dev:
poetry add --dev $(pkg)
requirements:
poetry export --without-hashes --output requirements.txt
update-version:
poetry version prerelease
sync-version-with-git:
git pull -p && poetry version $(git rev-list --tags --max-count=1 | git describe --tags --abbrev=0)
docker: docker-rm docker-build-run
docker-build-run: docker-build docker-run
docker-build:
$(DOCKER) build \
--no-cache --progress=plain \
-t $(IMAGE_NAME) -f $(DOCKERFILE) .
docker-run:
$(DOCKER) run -it --rm -p $(HOST_PORT):$(CONTAINER_PORT)/tcp --name $(CONTAINER_NAME) $(IMAGE_NAME) python app.py
docker-rm: docker-rm-container docker-rm-image
docker-rm-container:
-$(DOCKER) rm $(CONTAINER_NAME)
docker-rm-image:
-$(DOCKER) image rm $(IMAGE_NAME)
tests:
poetry run pytest ./tests
prep-commit:
docs get-license sync-version-with-git update-version pre-commit
pre-commit:
pre-commit run --all-files
get-licenses:
pip-licenses --format=json --order=license --with-urls > pkg-licenses.json
docs: sphinx_apidoc sphinx_html
sphinx_html:
poetry run sphinx-build -b html docs/source/ docs/build/html -E -a
sphinx_apidoc:
poetry run sphinx-apidoc -o ./docs/source/modules ./src/rule_engine

241
README.md
View File

@ -1,105 +1,220 @@
# Infrastructure to deploy Research Projects
# PyInfra
The Infrastructure expects to be deployed in the same Pod / local environment as the analysis container and handles all outbound communication.
1. [ About ](#about)
2. [ Configuration ](#configuration)
3. [ Queue Manager ](#queue-manager)
4. [ Module Installation ](#module-installation)
5. [ Scripts ](#scripts)
6. [ Tests ](#tests)
7. [ Opentelemetry protobuf dependency hell ](#opentelemetry-protobuf-dependency-hell)
## About
Shared library for the research team, containing code related to infrastructure and communication with other services.
Offers a simple interface for processing data and sending responses via AMQP, monitoring via Prometheus and storage
access via S3 or Azure. Also export traces via OpenTelemetry for queue messages and webserver requests.
To start, see the [complete example](pyinfra/examples.py) which shows how to use all features of the service and can be
imported and used directly for default research service pipelines (data ID in message, download data from storage,
upload result while offering Prometheus monitoring, /health and /ready endpoints and multi tenancy support).
## Configuration
A configuration is located in `/config.yaml`. All relevant variables can be configured via exporting environment variables.
Configuration is done via `Dynaconf`. This means that you can use environment variables, a `.env` file or `.toml`
file(s) to configure the service. You can also combine these methods. The precedence is
`environment variables > .env > .toml`. It is recommended to load settings with the provided
[`load_settings`](pyinfra/config/loader.py) function, which you can combine with the provided
[`parse_args`](pyinfra/config/loader.py) function. This allows you to load settings from a `.toml` file or a folder with
`.toml` files and override them with environment variables.
| Environment Variable | Default | Description |
|-------------------------------|--------------------------------|-----------------------------------------------------------------------|
| LOGGING_LEVEL_ROOT | DEBUG | Logging level for service logger |
| PROBING_WEBSERVER_HOST | "0.0.0.0" | Probe webserver address |
| PROBING_WEBSERVER_PORT | 8080 | Probe webserver port |
| PROBING_WEBSERVER_MODE | production | Webserver mode: {development, production} |
| RABBITMQ_HOST | localhost | RabbitMQ host address |
| RABBITMQ_PORT | 5672 | RabbitMQ host port |
| RABBITMQ_USERNAME | user | RabbitMQ username |
| RABBITMQ_PASSWORD | bitnami | RabbitMQ password |
| RABBITMQ_HEARTBEAT | 7200 | Controls AMQP heartbeat timeout in seconds |
| REQUEST_QUEUE | request_queue | Requests to service |
| RESPONSE_QUEUE | response_queue | Responses by service |
| DEAD_LETTER_QUEUE | dead_letter_queue | Messages that failed to process |
| ANALYSIS_ENDPOINT | "http://127.0.0.1:5000" | Endpoint for analysis container |
| STORAGE_BACKEND | s3 | The type of storage to use {s3, azure} |
| STORAGE_BUCKET | "pyinfra-test-bucket" | The bucket / container to pull files specified in queue requests from |
| STORAGE_ENDPOINT | "http://127.0.0.1:9000" | Endpoint for s3 storage |
| STORAGE_KEY | root | User for s3 storage |
| STORAGE_SECRET | password | Password for s3 storage |
| STORAGE_AZURECONNECTIONSTRING | "DefaultEndpointsProtocol=..." | Connection string for Azure storage |
The following table shows all necessary settings. You can find a preconfigured settings file for this service in
bitbucket. These are the complete settings, you only need all if using all features of the service as described in
the [complete example](pyinfra/examples.py).
## Response Format
| Environment Variable | Internal / .toml Name | Description |
| ------------------------------------------ | --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| LOGGING\_\_LEVEL | logging.level | Log level |
| DYNAMIC_TENANT_QUEUES\_\_ENABLED | dynamic_tenant_queues.enabled | Enable queues per tenant that are dynamically created mode |
| METRICS\_\_PROMETHEUS\_\_ENABLED | metrics.prometheus.enabled | Enable Prometheus metrics collection |
| METRICS\_\_PROMETHEUS\_\_PREFIX | metrics.prometheus.prefix | Prefix for Prometheus metrics (e.g. {product}-{service}) |
| WEBSERVER\_\_HOST | webserver.host | Host of the webserver (offering e.g. /prometheus, /ready and /health endpoints) |
| WEBSERVER\_\_PORT | webserver.port | Port of the webserver |
| RABBITMQ\_\_HOST | rabbitmq.host | Host of the RabbitMQ server |
| RABBITMQ\_\_PORT | rabbitmq.port | Port of the RabbitMQ server |
| RABBITMQ\_\_USERNAME | rabbitmq.username | Username for the RabbitMQ server |
| RABBITMQ\_\_PASSWORD | rabbitmq.password | Password for the RabbitMQ server |
| RABBITMQ\_\_HEARTBEAT | rabbitmq.heartbeat | Heartbeat for the RabbitMQ server |
| RABBITMQ\_\_CONNECTION_SLEEP | rabbitmq.connection_sleep | Sleep time intervals during message processing. Has to be a divider of heartbeat, and shouldn't be too big, since only in these intervals queue interactions happen (like receiving new messages) This is also the minimum time the service needs to process a message. |
| RABBITMQ\_\_INPUT_QUEUE | rabbitmq.input_queue | Name of the input queue in single queue setting |
| RABBITMQ\_\_OUTPUT_QUEUE | rabbitmq.output_queue | Name of the output queue in single queue setting |
| RABBITMQ\_\_DEAD_LETTER_QUEUE | rabbitmq.dead_letter_queue | Name of the dead letter queue in single queue setting |
| RABBITMQ\_\_TENANT_EVENT_QUEUE_SUFFIX | rabbitmq.tenant_event_queue_suffix | Suffix for the tenant event queue in multi tenant/queue setting |
| RABBITMQ\_\_TENANT_EVENT_DLQ_SUFFIX | rabbitmq.tenant_event_dlq_suffix | Suffix for the dead letter queue in multi tenant/queue setting |
| RABBITMQ\_\_TENANT_EXCHANGE_NAME | rabbitmq.tenant_exchange_name | Name of tenant exchange in multi tenant/queue setting |
| RABBITMQ\_\_QUEUE_EXPIRATION_TIME | rabbitmq.queue_expiration_time | Time until queue expiration in multi tenant/queue setting |
| RABBITMQ\_\_SERVICE_REQUEST_QUEUE_PREFIX | rabbitmq.service_request_queue_prefix | Service request queue prefix in multi tenant/queue setting |
| RABBITMQ\_\_SERVICE_REQUEST_EXCHANGE_NAME | rabbitmq.service_request_exchange_name | Service request exchange name in multi tenant/queue setting |
| RABBITMQ\_\_SERVICE_RESPONSE_EXCHANGE_NAME | rabbitmq.service_response_exchange_name | Service response exchange name in multi tenant/queue setting |
| RABBITMQ\_\_SERVICE_DLQ_NAME | rabbitmq.service_dlq_name | Service dead letter queue name in multi tenant/queue setting |
| STORAGE\_\_BACKEND | storage.backend | Storage backend to use (currently only "s3" and "azure" are supported) |
| STORAGE\_\_S3\_\_BUCKET | storage.s3.bucket | Name of the S3 bucket |
| STORAGE\_\_S3\_\_ENDPOINT | storage.s3.endpoint | Endpoint of the S3 server |
| STORAGE\_\_S3\_\_KEY | storage.s3.key | Access key for the S3 server |
| STORAGE\_\_S3\_\_SECRET | storage.s3.secret | Secret key for the S3 server |
| STORAGE\_\_S3\_\_REGION | storage.s3.region | Region of the S3 server |
| STORAGE\_\_AZURE\_\_CONTAINER | storage.azure.container_name | Name of the Azure container |
| STORAGE\_\_AZURE\_\_CONNECTION_STRING | storage.azure.connection_string | Connection string for the Azure server |
| STORAGE\_\_TENANT_SERVER\_\_PUBLIC_KEY | storage.tenant_server.public_key | Public key of the tenant server |
| STORAGE\_\_TENANT_SERVER\_\_ENDPOINT | storage.tenant_server.endpoint | Endpoint of the tenant server |
| TRACING\_\_ENABLED | tracing.enabled | Enable tracing |
| TRACING\_\_TYPE | tracing.type | Tracing mode - possible values: "opentelemetry", "azure_monitor" (Excpects APPLICATIONINSIGHTS_CONNECTION_STRING environment variable.) |
| TRACING\_\_OPENTELEMETRY\_\_ENDPOINT | tracing.opentelemetry.endpoint | Endpoint to which OpenTelemetry traces are exported |
| TRACING\_\_OPENTELEMETRY\_\_SERVICE_NAME | tracing.opentelemetry.service_name | Name of the service as displayed in the traces collected |
| TRACING\_\_OPENTELEMETRY\_\_EXPORTER | tracing.opentelemetry.exporter | Name of exporter |
| KUBERNETES\_\_POD_NAME | kubernetes.pod_name | Service pod name |
### Expected AMQP input message:
## Setup
**IMPORTANT** you need to set the following environment variables before running the setup script:
- ``$NEXUS_USER`` your Nexus user (usually equal to firstname.lastname@knecon.com)
- ``$NEXUS_PASSWORD`` your Nexus password (usually equal to your Azure Login)
```shell
# create venv and activate it
source ./scripts/setup/devenvsetup.sh {{ cookiecutter.python_version }} $NEXUS_USER $NEXUS_PASSWORD
source .venv/bin/activate
```
### OpenTelemetry
Open telemetry (vis its Python SDK) is set up to be as unobtrusive as possible; for typical use cases it can be
configured
from environment variables, without additional work in the microservice app, although additional confiuration is
possible.
`TRACING__OPENTELEMETRY__ENDPOINT` should typically be set
to `http://otel-collector-opentelemetry-collector.otel-collector:4318/v1/traces`.
## Queue Manager
The queue manager is responsible for consuming messages from the input queue, processing them and sending the response
to the output queue. The default callback also downloads data from the storage and uploads the result to the storage.
The response message does not contain the data itself, but the identifiers from the input message (including headers
beginning with "X-").
### Standalone Usage
```python
from pyinfra.queue.manager import QueueManager
from pyinfra.queue.callback import make_download_process_upload_callback, DataProcessor
from pyinfra.config.loader import load_settings
settings = load_settings("path/to/settings")
processing_function: DataProcessor # function should expect a dict (json) or bytes (pdf) as input and should return a json serializable object.
queue_manager = QueueManager(settings)
callback = make_download_process_upload_callback(processing_function, settings)
queue_manager.start_consuming(make_download_process_upload_callback(callback, settings))
```
### Usage in a Service
This is the recommended way to use the module. This includes the webserver, Prometheus metrics and health endpoints.
Custom endpoints can be added by adding a new route to the `app` object beforehand. Settings are loaded from files
specified as CLI arguments (e.g. `--settings-path path/to/settings.toml`). The values can also be set or overriden via
environment variables (e.g. `LOGGING__LEVEL=DEBUG`).
The callback can be replaced with a custom one, for example if the data to process is contained in the message itself
and not on the storage.
```python
from pyinfra.config.loader import load_settings, parse_settings_path
from pyinfra.examples import start_standard_queue_consumer
from pyinfra.queue.callback import make_download_process_upload_callback, DataProcessor
processing_function: DataProcessor
arguments = parse_settings_path()
settings = load_settings(arguments.settings_path)
callback = make_download_process_upload_callback(processing_function, settings)
start_standard_queue_consumer(callback, settings) # optionally also pass a fastAPI app object with preconfigured routes
```
### AMQP input message:
Either use the legacy format with dossierId and fileId as strings or the new format where absolute paths are used.
All headers beginning with "X-" are forwarded to the message processor, and returned in the response message (e.g.
"X-TENANT-ID" is used to acquire storage information for the tenant).
```json
{
"dossierId": "",
"fileId": "",
"targetFileExtension": "",
"responseFileExtension": ""
"targetFilePath": "",
"responseFilePath": ""
}
```
Optionally, the input message can contain a field with the key `"operations"`.
### AMQP output message:
or
```json
{
"dossierId": "",
"fileId": "",
...
"targetFileExtension": "",
"responseFileExtension": ""
}
```
## Development
## Module Installation
Either run `src/serve.py` or the built Docker image.
Add the respective version of the pyinfra package to your pyproject.toml file. Make sure to add our gitlab registry as a
source.
For now, all internal packages used by pyinfra also have to be added to the pyproject.toml file (namely kn-utils).
Execute `poetry lock` and `poetry install` to install the packages.
### Setup
You can look up the latest version of the package in
the [gitlab registry](https://gitlab.knecon.com/knecon/research/pyinfra/-/packages).
For the used versions of internal dependencies, please refer to the [pyproject.toml](pyproject.toml) file.
Install module.
```toml
[tool.poetry.dependencies]
pyinfra = { version = "x.x.x", source = "gitlab-research" }
kn-utils = { version = "x.x.x", source = "gitlab-research" }
```bash
pip install -e .
pip install -r requirements.txt
[[tool.poetry.source]]
name = "gitlab-research"
url = "https://gitlab.knecon.com/api/v4/groups/19/-/packages/pypi/simple"
priority = "explicit"
```
or build docker image.
## Scripts
### Run pyinfra locally
**Shell 1**: Start minio and rabbitmq containers
```bash
docker build -f Dockerfile -t pyinfra .
$ cd tests && docker compose up
```
### Usage
**Shell 1:** Start a MinIO and a RabbitMQ docker container.
**Shell 2**: Start pyinfra with callback mock
```bash
docker-compose up
$ python scripts/start_pyinfra.py
```
**Shell 2:** Add files to the local minio storage.
**Shell 3**: Upload dummy content on storage and publish message
```bash
python scripts/manage_minio.py add <MinIO target folder> -d path/to/a/folder/with/PDFs
$ python scripts/send_request.py
```
**Shell 2:** Run pyinfra-server.
## Tests
```bash
python src/serve.py
```
or as container:
Tests require a running minio and rabbitmq container, meaning you have to run `docker compose up` in the tests folder
before running the tests.
```bash
docker run --net=host pyinfra
```
## OpenTelemetry Protobuf Dependency Hell
**Shell 3:** Run analysis-container.
**Shell 4:** Start a client that sends requests to process PDFs from the MinIO store and annotates these PDFs according to the service responses.
```bash
python scripts/mock_client.py
```
**Note**: Status 2025/01/09: the currently used `opentelemetry-exporter-otlp-proto-http` version `1.25.0` requires
a `protobuf` version < `5.x.x` and is not compatible with the latest protobuf version `5.27.x`. This is an [open issue](https://github.com/open-telemetry/opentelemetry-python/issues/3958) in opentelemetry, because [support for 4.25.x ends in Q2 '25](https://protobuf.dev/support/version-support/#python).
Therefore, we should keep this in mind and update the dependency once opentelemetry includes support for `protobuf 5.27.x`.

View File

@ -1,40 +0,0 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.atlassian.bamboo</groupId>
<artifactId>bamboo-specs-parent</artifactId>
<version>7.1.2</version>
<relativePath/>
</parent>
<artifactId>bamboo-specs</artifactId>
<version>1.0.0-SNAPSHOT</version>
<packaging>jar</packaging>
<properties>
<sonar.skip>true</sonar.skip>
</properties>
<dependencies>
<dependency>
<groupId>com.atlassian.bamboo</groupId>
<artifactId>bamboo-specs-api</artifactId>
</dependency>
<dependency>
<groupId>com.atlassian.bamboo</groupId>
<artifactId>bamboo-specs</artifactId>
</dependency>
<!-- Test dependencies -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<!-- run 'mvn test' to perform offline validation of the plan -->
<!-- run 'mvn -Ppublish-specs' to upload the plan to your Bamboo server -->
</project>

View File

@ -1,179 +0,0 @@
package buildjob;
import com.atlassian.bamboo.specs.api.BambooSpec;
import com.atlassian.bamboo.specs.api.builders.BambooKey;
import com.atlassian.bamboo.specs.api.builders.docker.DockerConfiguration;
import com.atlassian.bamboo.specs.api.builders.permission.PermissionType;
import com.atlassian.bamboo.specs.api.builders.permission.Permissions;
import com.atlassian.bamboo.specs.api.builders.permission.PlanPermissions;
import com.atlassian.bamboo.specs.api.builders.plan.Job;
import com.atlassian.bamboo.specs.api.builders.plan.Plan;
import com.atlassian.bamboo.specs.api.builders.plan.PlanIdentifier;
import com.atlassian.bamboo.specs.api.builders.plan.Stage;
import com.atlassian.bamboo.specs.api.builders.plan.branches.BranchCleanup;
import com.atlassian.bamboo.specs.api.builders.plan.branches.PlanBranchManagement;
import com.atlassian.bamboo.specs.api.builders.project.Project;
import com.atlassian.bamboo.specs.builders.task.CheckoutItem;
import com.atlassian.bamboo.specs.builders.task.InjectVariablesTask;
import com.atlassian.bamboo.specs.builders.task.ScriptTask;
import com.atlassian.bamboo.specs.builders.task.VcsCheckoutTask;
import com.atlassian.bamboo.specs.builders.task.CleanWorkingDirectoryTask;
import com.atlassian.bamboo.specs.builders.task.VcsTagTask;
import com.atlassian.bamboo.specs.builders.trigger.BitbucketServerTrigger;
import com.atlassian.bamboo.specs.model.task.InjectVariablesScope;
import com.atlassian.bamboo.specs.api.builders.Variable;
import com.atlassian.bamboo.specs.util.BambooServer;
import com.atlassian.bamboo.specs.builders.task.ScriptTask;
import com.atlassian.bamboo.specs.model.task.ScriptTaskProperties.Location;
/**
* Plan configuration for Bamboo.
* Learn more on: <a href="https://confluence.atlassian.com/display/BAMBOO/Bamboo+Specs">https://confluence.atlassian.com/display/BAMBOO/Bamboo+Specs</a>
*/
@BambooSpec
public class PlanSpec {
private static final String SERVICE_NAME = "pyinfra";
private static final String SERVICE_KEY = SERVICE_NAME.toUpperCase().replaceAll("-","");
/**
* Run main to publish plan on Bamboo
*/
public static void main(final String[] args) throws Exception {
//By default credentials are read from the '.credentials' file.
BambooServer bambooServer = new BambooServer("http://localhost:8085");
Plan plan = new PlanSpec().createDockerBuildPlan();
bambooServer.publish(plan);
PlanPermissions planPermission = new PlanSpec().createPlanPermission(plan.getIdentifier());
bambooServer.publish(planPermission);
}
private PlanPermissions createPlanPermission(PlanIdentifier planIdentifier) {
Permissions permission = new Permissions()
.userPermissions("atlbamboo", PermissionType.EDIT, PermissionType.VIEW, PermissionType.ADMIN, PermissionType.CLONE, PermissionType.BUILD)
.groupPermissions("research", PermissionType.EDIT, PermissionType.VIEW, PermissionType.CLONE, PermissionType.BUILD)
.groupPermissions("Development", PermissionType.EDIT, PermissionType.VIEW, PermissionType.CLONE, PermissionType.BUILD)
.groupPermissions("QA", PermissionType.EDIT, PermissionType.VIEW, PermissionType.CLONE, PermissionType.BUILD)
.loggedInUserPermissions(PermissionType.VIEW)
.anonymousUserPermissionView();
return new PlanPermissions(planIdentifier.getProjectKey(), planIdentifier.getPlanKey()).permissions(permission);
}
private Project project() {
return new Project()
.name("RED")
.key(new BambooKey("RED"));
}
public Plan createDockerBuildPlan() {
return new Plan(
project(),
SERVICE_NAME, new BambooKey(SERVICE_KEY))
.description("Docker build for pyinfra")
.stages(
new Stage("Build Stage")
.jobs(
new Job("Build Job", new BambooKey("BUILD"))
.tasks(
new CleanWorkingDirectoryTask()
.description("Clean working directory.")
.enabled(true),
new VcsCheckoutTask()
.description("Checkout default repository.")
.checkoutItems(new CheckoutItem().defaultRepository()),
new ScriptTask()
.description("Set config and keys.")
.inlineBody("mkdir -p ~/.ssh\n" +
"echo \"${bamboo.bamboo_agent_ssh}\" | base64 -d >> ~/.ssh/id_rsa\n" +
"echo \"host vector.iqser.com\" > ~/.ssh/config\n" +
"echo \" user bamboo-agent\" >> ~/.ssh/config\n" +
"chmod 600 ~/.ssh/config ~/.ssh/id_rsa"),
new ScriptTask()
.description("Build Docker container.")
.location(Location.FILE)
.fileFromPath("bamboo-specs/src/main/resources/scripts/docker-build.sh")
.argument(SERVICE_NAME))
.dockerConfiguration(
new DockerConfiguration()
.image("nexus.iqser.com:5001/infra/release_build:4.2.0")
.volume("/var/run/docker.sock", "/var/run/docker.sock"))),
new Stage("Sonar Stage")
.jobs(
new Job("Sonar Job", new BambooKey("SONAR"))
.tasks(
new CleanWorkingDirectoryTask()
.description("Clean working directory.")
.enabled(true),
new VcsCheckoutTask()
.description("Checkout default repository.")
.checkoutItems(new CheckoutItem().defaultRepository()),
new ScriptTask()
.description("Set config and keys.")
.inlineBody("mkdir -p ~/.ssh\n" +
"echo \"${bamboo.bamboo_agent_ssh}\" | base64 -d >> ~/.ssh/id_rsa\n" +
"echo \"host vector.iqser.com\" > ~/.ssh/config\n" +
"echo \" user bamboo-agent\" >> ~/.ssh/config\n" +
"chmod 600 ~/.ssh/config ~/.ssh/id_rsa"),
new ScriptTask()
.description("Run Sonarqube scan.")
.location(Location.FILE)
.fileFromPath("bamboo-specs/src/main/resources/scripts/sonar-scan.sh")
.argument(SERVICE_NAME),
new ScriptTask()
.description("Shut down any running docker containers.")
.location(Location.FILE)
.inlineBody("pip install docker-compose\n" +
"docker-compose down"))
.dockerConfiguration(
new DockerConfiguration()
.image("nexus.iqser.com:5001/infra/release_build:4.2.0")
.volume("/var/run/docker.sock", "/var/run/docker.sock"))),
new Stage("Licence Stage")
.jobs(
new Job("Git Tag Job", new BambooKey("GITTAG"))
.tasks(
new VcsCheckoutTask()
.description("Checkout default repository.")
.checkoutItems(new CheckoutItem().defaultRepository()),
new ScriptTask()
.description("Build git tag.")
.location(Location.FILE)
.fileFromPath("bamboo-specs/src/main/resources/scripts/git-tag.sh"),
new InjectVariablesTask()
.description("Inject git tag.")
.path("git.tag")
.namespace("g")
.scope(InjectVariablesScope.LOCAL),
new VcsTagTask()
.description("${bamboo.g.gitTag}")
.tagName("${bamboo.g.gitTag}")
.defaultRepository())
.dockerConfiguration(
new DockerConfiguration()
.image("nexus.iqser.com:5001/infra/release_build:4.4.1")),
new Job("Licence Job", new BambooKey("LICENCE"))
.enabled(false)
.tasks(
new VcsCheckoutTask()
.description("Checkout default repository.")
.checkoutItems(new CheckoutItem().defaultRepository()),
new ScriptTask()
.description("Build licence.")
.location(Location.FILE)
.fileFromPath("bamboo-specs/src/main/resources/scripts/create-licence.sh"))
.dockerConfiguration(
new DockerConfiguration()
.image("nexus.iqser.com:5001/infra/maven:3.6.2-jdk-13-3.0.0")
.volume("/etc/maven/settings.xml", "/usr/share/maven/ref/settings.xml")
.volume("/var/run/docker.sock", "/var/run/docker.sock"))))
.linkedRepositories("RR / " + SERVICE_NAME)
.triggers(new BitbucketServerTrigger())
.planBranchManagement(new PlanBranchManagement()
.createForVcsBranch()
.delete(new BranchCleanup()
.whenInactiveInRepositoryAfterDays(14))
.notificationForCommitters());
}
}

View File

@ -1,19 +0,0 @@
#!/bin/bash
set -e
if [[ \"${bamboo_version_tag}\" != \"dev\" ]]
then
${bamboo_capability_system_builder_mvn3_Maven_3}/bin/mvn \
-f ${bamboo_build_working_directory}/pom.xml \
versions:set \
-DnewVersion=${bamboo_version_tag}
${bamboo_capability_system_builder_mvn3_Maven_3}/bin/mvn \
-f ${bamboo_build_working_directory}/pom.xml \
-B clean deploy \
-e -DdeployAtEnd=true \
-Dmaven.wagon.http.ssl.insecure=true \
-Dmaven.wagon.http.ssl.allowall=true \
-Dmaven.wagon.http.ssl.ignore.validity.dates=true \
-DaltDeploymentRepository=iqser_release::default::https://nexus.iqser.com/repository/gin4-platform-releases
fi

View File

@ -1,13 +0,0 @@
#!/bin/bash
set -e
SERVICE_NAME=$1
python3 -m venv build_venv
source build_venv/bin/activate
python3 -m pip install --upgrade pip
echo "index-url = https://${bamboo_nexus_user}:${bamboo_nexus_password}@nexus.iqser.com/repository/python-combind/simple" >> pip.conf
docker build -f Dockerfile -t nexus.iqser.com:5001/red/$SERVICE_NAME:${bamboo_version_tag} .
echo "${bamboo_nexus_password}" | docker login --username "${bamboo_nexus_user}" --password-stdin nexus.iqser.com:5001
docker push nexus.iqser.com:5001/red/$SERVICE_NAME:${bamboo_version_tag}

View File

@ -1,9 +0,0 @@
#!/bin/bash
set -e
if [[ "${bamboo_version_tag}" = "dev" ]]
then
echo "gitTag=${bamboo_planRepository_1_branch}_${bamboo_buildNumber}" > git.tag
else
echo "gitTag=${bamboo_version_tag}" > git.tag
fi

View File

@ -1,58 +0,0 @@
#!/bin/bash
set -e
export JAVA_HOME=/usr/bin/sonar-scanner/jre
python3 -m venv build_venv
source build_venv/bin/activate
python3 -m pip install --upgrade pip
python3 -m pip install dependency-check
python3 -m pip install docker-compose
python3 -m pip install coverage
echo "docker-compose down"
docker-compose down
sleep 30
echo "coverage report generation"
bash run_tests.sh
if [ ! -f reports/coverage.xml ]
then
exit 1
fi
SERVICE_NAME=$1
echo "dependency-check:aggregate"
mkdir -p reports
dependency-check --enableExperimental -f JSON -f XML \
--disableAssembly -s . -o reports --project $SERVICE_NAME --exclude ".git/**" --exclude "venv/**" \
--exclude "build_venv/**" --exclude "**/__pycache__/**" --exclude "bamboo-specs/**"
if [[ -z "${bamboo_repository_pr_key}" ]]
then
echo "Sonar Scan for branch: ${bamboo_planRepository_1_branch}"
/usr/bin/sonar-scanner/bin/sonar-scanner -X\
-Dsonar.projectKey=RED_$SERVICE_NAME \
-Dsonar.host.url=https://sonarqube.iqser.com \
-Dsonar.login=${bamboo_sonarqube_api_token_secret} \
-Dsonar.dependencyCheck.jsonReportPath=reports/dependency-check-report.json \
-Dsonar.dependencyCheck.xmlReportPath=reports/dependency-check-report.xml \
-Dsonar.dependencyCheck.htmlReportPath=reports/dependency-check-report.html \
-Dsonar.python.coverage.reportPaths=reports/coverage.xml
else
echo "Sonar Scan for PR with key1: ${bamboo_repository_pr_key}"
/usr/bin/sonar-scanner/bin/sonar-scanner \
-Dsonar.projectKey=RED_$SERVICE_NAME \
-Dsonar.host.url=https://sonarqube.iqser.com \
-Dsonar.login=${bamboo_sonarqube_api_token_secret} \
-Dsonar.pullrequest.key=${bamboo_repository_pr_key} \
-Dsonar.pullrequest.branch=${bamboo_repository_pr_sourceBranch} \
-Dsonar.pullrequest.base=${bamboo_repository_pr_targetBranch} \
-Dsonar.dependencyCheck.jsonReportPath=reports/dependency-check-report.json \
-Dsonar.dependencyCheck.xmlReportPath=reports/dependency-check-report.xml \
-Dsonar.dependencyCheck.htmlReportPath=reports/dependency-check-report.html \
-Dsonar.python.coverage.reportPaths=reports/coverage.xml
fi

View File

@ -1,16 +0,0 @@
package buildjob;
import com.atlassian.bamboo.specs.api.builders.plan.Plan;
import com.atlassian.bamboo.specs.api.exceptions.PropertiesValidationException;
import com.atlassian.bamboo.specs.api.util.EntityPropertiesBuilders;
import org.junit.Test;
public class PlanSpecTest {
@Test
public void checkYourPlanOffline() throws PropertiesValidationException {
Plan plan = new PlanSpec().createDockerBuildPlan();
EntityPropertiesBuilders.build(plan);
}
}

View File

@ -1,6 +0,0 @@
___ _ _ ___ __
o O O | _ \ | || | |_ _| _ _ / _| _ _ __ _
o | _/ \_, | | | | ' \ | _| | '_| / _` |
TS__[O] _|_|_ _|__/ |___| |_||_| _|_|_ _|_|_ \__,_|
{======|_| ``` |_| ````|_|`````|_|`````|_|`````|_|`````|_|`````|
./o--000' `-0-0-' `-0-0-' `-0-0-' `-0-0-' `-0-0-' `-0-0-' `-0-0-'

27459
bom.json Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,35 +0,0 @@
service:
logging_level: $LOGGING_LEVEL_ROOT|DEBUG # Logging level for service logger
probing_webserver:
host: $PROBING_WEBSERVER_HOST|"0.0.0.0" # Probe webserver address
port: $PROBING_WEBSERVER_PORT|8080 # Probe webserver port
mode: $PROBING_WEBSERVER_MODE|production # webserver mode: {development, production}
rabbitmq:
host: $RABBITMQ_HOST|localhost # RabbitMQ host address
port: $RABBITMQ_PORT|5672 # RabbitMQ host port
user: $RABBITMQ_USERNAME|user # RabbitMQ username
password: $RABBITMQ_PASSWORD|bitnami # RabbitMQ password
heartbeat: $RABBITMQ_HEARTBEAT|7200 # Controls AMQP heartbeat timeout in seconds
queues:
input: $REQUEST_QUEUE|request_queue # Requests to service
output: $RESPONSE_QUEUE|response_queue # Responses by service
dead_letter: $DEAD_LETTER_QUEUE|dead_letter_queue # Messages that failed to process
callback:
analysis_endpoint: $ANALYSIS_ENDPOINT|"http://127.0.0.1:5000"
storage:
backend: $STORAGE_BACKEND|s3 # The type of storage to use {s3, azure}
bucket: "STORAGE_BUCKET_NAME|STORAGE_AZURECONTAINERNAME|pyinfra-test-bucket" # The bucket / container to pull files specified in queue requests from
s3:
endpoint: $STORAGE_ENDPOINT|"http://127.0.0.1:9000"
access_key: $STORAGE_KEY|root
secret_key: $STORAGE_SECRET|password
region: $STORAGE_REGION|"eu-west-1"
azure:
connection_string: $STORAGE_AZURECONNECTIONSTRING|"DefaultEndpointsProtocol=https;AccountName=iqserdevelopment;AccountKey=4imAbV9PYXaztSOMpIyAClg88bAZCXuXMGJG0GA1eIBpdh2PlnFGoRBnKqLy2YZUSTmZ3wJfC7tzfHtuC6FEhQ==;EndpointSuffix=core.windows.net"

View File

@ -1,32 +0,0 @@
version: '2'
services:
minio:
image: minio/minio
ports:
- "9000:9000"
environment:
- MINIO_ROOT_PASSWORD=password
- MINIO_ROOT_USER=root
volumes:
- ./data/minio_store:/data
command: server /data
network_mode: "bridge"
rabbitmq:
image: docker.io/bitnami/rabbitmq:3.9
ports:
- '4369:4369'
- '5551:5551'
- '5552:5552'
- '5672:5672'
- '25672:25672'
- '15672:15672'
environment:
- RABBITMQ_SECURE_PASSWORD=yes
- RABBITMQ_VM_MEMORY_HIGH_WATERMARK=100%
- RABBITMQ_DISK_FREE_ABSOLUTE_LIMIT=20Gi
network_mode: "bridge"
volumes:
- /opt/bitnami/rabbitmq/.rabbitmq/:/data/bitnami
volumes:
mdata:

6802
poetry.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@

View File

@ -1,55 +0,0 @@
"""Implements a config object with dot-indexing syntax."""
import os
from itertools import chain
from operator import truth
from envyaml import EnvYAML
from funcy import first, juxt, butlast, last
from pyinfra.locations import CONFIG_FILE
def _get_item_and_maybe_make_dotindexable(container, item):
ret = container[item]
return DotIndexable(ret) if isinstance(ret, dict) else ret
class DotIndexable:
def __init__(self, x):
self.x = x
def __getattr__(self, item):
return _get_item_and_maybe_make_dotindexable(self.x, item)
def __repr__(self):
return self.x.__repr__()
def __getitem__(self, item):
return self.__getattr__(item)
class Config:
def __init__(self, config_path):
self.__config = EnvYAML(config_path)
def __getattr__(self, item):
if item in self.__config:
return _get_item_and_maybe_make_dotindexable(self.__config, item)
def __getitem__(self, item):
return self.__getattr__(item)
CONFIG = Config(CONFIG_FILE)
def parse_disjunction_string(disjunction_string):
def try_parse_env_var(disjunction_string):
try:
return os.environ[disjunction_string]
except KeyError:
return None
options = disjunction_string.split("|")
identifiers, fallback_value = juxt(butlast, last)(options)
return first(chain(filter(truth, map(try_parse_env_var, identifiers)), [fallback_value]))

133
pyinfra/config/loader.py Normal file
View File

@ -0,0 +1,133 @@
import argparse
import os
from functools import partial
from pathlib import Path
from typing import Union
from dynaconf import Dynaconf, ValidationError, Validator
from funcy import lflatten
from kn_utils.logging import logger
# This path is ment for testing purposes and convenience. It probably won't reflect the actual root path when pyinfra is
# installed as a package, so don't use it in production code, but define your own root path as described in load config.
local_pyinfra_root_path = Path(__file__).parents[2]
def load_settings(
settings_path: Union[str, Path, list] = "config/",
root_path: Union[str, Path] = None,
validators: list[Validator] = None,
):
"""Load settings from .toml files, .env and environment variables. Also ensures a ROOT_PATH environment variable is
set. If ROOT_PATH is not set and no root_path argument is passed, the current working directory is used as root.
Settings paths can be a single .toml file, a folder containing .toml files or a list of .toml files and folders.
If a ROOT_PATH environment variable is set, it is not overwritten by the root_path argument.
If a folder is passed, all .toml files in the folder are loaded. If settings path is None, only .env and
environment variables are loaded. If settings_path are relative paths, they are joined with the root_path argument.
"""
root_path = get_or_set_root_path(root_path)
validators = validators or get_pyinfra_validators()
settings_files = normalize_to_settings_files(settings_path, root_path)
settings = Dynaconf(
load_dotenv=True,
envvar_prefix=False,
settings_files=settings_files,
)
validate_settings(settings, validators)
logger.info("Settings loaded and validated.")
return settings
def normalize_to_settings_files(settings_path: Union[str, Path, list], root_path: Union[str, Path]):
if settings_path is None:
logger.info("No settings path specified, only loading .env end ENVs.")
settings_files = []
elif isinstance(settings_path, str) or isinstance(settings_path, Path):
settings_files = [settings_path]
elif isinstance(settings_path, list):
settings_files = settings_path
else:
raise ValueError(f"Invalid settings path: {settings_path=}")
settings_files = lflatten(map(partial(_normalize_and_verify, root_path=root_path), settings_files))
logger.debug(f"Normalized settings files: {settings_files}")
return settings_files
def _normalize_and_verify(settings_path: Path, root_path: Path):
settings_path = Path(settings_path)
root_path = Path(root_path)
if not settings_path.is_absolute():
logger.debug(f"Settings path is not absolute, joining with root path: {root_path}")
settings_path = root_path / settings_path
if settings_path.is_dir():
logger.debug(f"Settings path is a directory, loading all .toml files in the directory: {settings_path}")
settings_files = list(settings_path.glob("*.toml"))
elif settings_path.is_file():
logger.debug(f"Settings path is a file, loading specified file: {settings_path}")
settings_files = [settings_path]
else:
raise ValueError(f"Invalid settings path: {settings_path=}, {root_path=}")
return settings_files
def get_or_set_root_path(root_path: Union[str, Path] = None):
env_root_path = os.environ.get("ROOT_PATH")
if env_root_path:
root_path = env_root_path
logger.debug(f"'ROOT_PATH' environment variable is set to {root_path}.")
elif root_path:
logger.info(f"'ROOT_PATH' environment variable is not set, setting to {root_path}.")
os.environ["ROOT_PATH"] = str(root_path)
else:
root_path = Path.cwd()
logger.info(f"'ROOT_PATH' environment variable is not set, defaulting to working directory {root_path}.")
os.environ["ROOT_PATH"] = str(root_path)
return root_path
def get_pyinfra_validators():
import pyinfra.config.validators
return lflatten(
validator for validator in pyinfra.config.validators.__dict__.values() if isinstance(validator, list)
)
def validate_settings(settings: Dynaconf, validators):
settings_valid = True
for validator in validators:
try:
validator.validate(settings)
except ValidationError as e:
settings_valid = False
logger.warning(e)
if not settings_valid:
raise ValidationError("Settings validation failed.")
logger.debug("Settings validated.")
def parse_settings_path():
parser = argparse.ArgumentParser()
parser.add_argument(
"settings_path",
help="Path to settings file(s) or folder(s). Must be .toml file(s) or a folder(s) containing .toml files.",
nargs="+",
)
return parser.parse_args().settings_path

View File

@ -0,0 +1,57 @@
from dynaconf import Validator
queue_manager_validators = [
Validator("rabbitmq.host", must_exist=True, is_type_of=str),
Validator("rabbitmq.port", must_exist=True, is_type_of=int),
Validator("rabbitmq.username", must_exist=True, is_type_of=str),
Validator("rabbitmq.password", must_exist=True, is_type_of=str),
Validator("rabbitmq.heartbeat", must_exist=True, is_type_of=int),
Validator("rabbitmq.connection_sleep", must_exist=True, is_type_of=int),
Validator("rabbitmq.input_queue", must_exist=True, is_type_of=str),
Validator("rabbitmq.output_queue", must_exist=True, is_type_of=str),
Validator("rabbitmq.dead_letter_queue", must_exist=True, is_type_of=str),
]
azure_storage_validators = [
Validator("storage.azure.connection_string", must_exist=True, is_type_of=str),
Validator("storage.azure.container", must_exist=True, is_type_of=str),
]
s3_storage_validators = [
Validator("storage.s3.endpoint", must_exist=True, is_type_of=str),
Validator("storage.s3.key", must_exist=True, is_type_of=str),
Validator("storage.s3.secret", must_exist=True, is_type_of=str),
Validator("storage.s3.region", must_exist=True, is_type_of=str),
Validator("storage.s3.bucket", must_exist=True, is_type_of=str),
]
storage_validators = [
Validator("storage.backend", must_exist=True, is_type_of=str),
]
multi_tenant_storage_validators = [
Validator("storage.tenant_server.endpoint", must_exist=True, is_type_of=str),
Validator("storage.tenant_server.public_key", must_exist=True, is_type_of=str),
]
prometheus_validators = [
Validator("metrics.prometheus.prefix", must_exist=True, is_type_of=str),
Validator("metrics.prometheus.enabled", must_exist=True, is_type_of=bool),
]
webserver_validators = [
Validator("webserver.host", must_exist=True, is_type_of=str),
Validator("webserver.port", must_exist=True, is_type_of=int),
]
tracing_validators = [
Validator("tracing.enabled", must_exist=True, is_type_of=bool),
Validator("tracing.type", must_exist=True, is_type_of=str)
]
opentelemetry_validators = [
Validator("tracing.opentelemetry.endpoint", must_exist=True, is_type_of=str),
Validator("tracing.opentelemetry.service_name", must_exist=True, is_type_of=str),
Validator("tracing.opentelemetry.exporter", must_exist=True, is_type_of=str)
]

169
pyinfra/examples.py Normal file
View File

@ -0,0 +1,169 @@
import asyncio
import signal
import sys
import aiohttp
from aiormq.exceptions import AMQPConnectionError
from dynaconf import Dynaconf
from fastapi import FastAPI
from kn_utils.logging import logger
from pyinfra.config.loader import get_pyinfra_validators, validate_settings
from pyinfra.queue.async_manager import AsyncQueueManager, RabbitMQConfig
from pyinfra.queue.callback import Callback
from pyinfra.queue.manager import QueueManager
from pyinfra.utils.opentelemetry import instrument_app, instrument_pika, setup_trace
from pyinfra.webserver.prometheus import (
add_prometheus_endpoint,
make_prometheus_processing_time_decorator_from_settings,
)
from pyinfra.webserver.utils import (
add_health_check_endpoint,
create_webserver_thread_from_settings,
run_async_webserver,
)
shutdown_flag = False
async def graceful_shutdown(manager: AsyncQueueManager, queue_task, webserver_task):
global shutdown_flag
shutdown_flag = True
logger.info("SIGTERM received, shutting down gracefully...")
if queue_task and not queue_task.done():
queue_task.cancel()
# await queue manager shutdown
await asyncio.gather(queue_task, manager.shutdown(), return_exceptions=True)
if webserver_task and not webserver_task.done():
webserver_task.cancel()
# await webserver shutdown
await asyncio.gather(webserver_task, return_exceptions=True)
logger.info("Shutdown complete.")
async def run_async_queues(manager: AsyncQueueManager, app, port, host):
"""Run the async webserver and the async queue manager concurrently."""
queue_task = None
webserver_task = None
tenant_api_available = True
# add signal handler for SIGTERM and SIGINT
loop = asyncio.get_running_loop()
loop.add_signal_handler(
signal.SIGTERM, lambda: asyncio.create_task(graceful_shutdown(manager, queue_task, webserver_task))
)
loop.add_signal_handler(
signal.SIGINT, lambda: asyncio.create_task(graceful_shutdown(manager, queue_task, webserver_task))
)
try:
active_tenants = await manager.fetch_active_tenants()
queue_task = asyncio.create_task(manager.run(active_tenants=active_tenants), name="queues")
webserver_task = asyncio.create_task(run_async_webserver(app, port, host), name="webserver")
await asyncio.gather(queue_task, webserver_task)
except asyncio.CancelledError:
logger.info("Main task was cancelled, initiating shutdown.")
except AMQPConnectionError as e:
logger.warning(f"AMQPConnectionError: {e} - shutting down.")
except (aiohttp.ClientResponseError, aiohttp.ClientConnectorError):
logger.warning("Tenant server did not answer - shutting down.")
tenant_api_available = False
except Exception as e:
logger.error(f"An error occurred while running async queues: {e}", exc_info=True)
sys.exit(1)
finally:
if shutdown_flag:
logger.debug("Graceful shutdown already in progress.")
else:
logger.warning("Initiating shutdown due to error or manual interruption.")
if not tenant_api_available:
sys.exit(0)
if queue_task and not queue_task.done():
queue_task.cancel()
if webserver_task and not webserver_task.done():
webserver_task.cancel()
await asyncio.gather(queue_task, manager.shutdown(), webserver_task, return_exceptions=True)
logger.info("Shutdown complete.")
def start_standard_queue_consumer(
callback: Callback,
settings: Dynaconf,
app: FastAPI = None,
):
"""Default serving logic for research services.
Supplies /health, /ready and /prometheus endpoints (if enabled). The callback is monitored for processing time per
message. Also traces the queue messages via openTelemetry (if enabled).
Workload is received via queue messages and processed by the callback function (see pyinfra.queue.callback for
callbacks).
"""
validate_settings(settings, get_pyinfra_validators())
logger.info("Starting webserver and queue consumer...")
app = app or FastAPI()
if settings.metrics.prometheus.enabled:
logger.info("Prometheus metrics enabled.")
app = add_prometheus_endpoint(app)
callback = make_prometheus_processing_time_decorator_from_settings(settings)(callback)
if settings.tracing.enabled:
setup_trace(settings)
instrument_pika(dynamic_queues=settings.dynamic_tenant_queues.enabled)
instrument_app(app)
if settings.dynamic_tenant_queues.enabled:
logger.info("Dynamic tenant queues enabled. Running async queues.")
config = RabbitMQConfig(
host=settings.rabbitmq.host,
port=settings.rabbitmq.port,
username=settings.rabbitmq.username,
password=settings.rabbitmq.password,
heartbeat=settings.rabbitmq.heartbeat,
input_queue_prefix=settings.rabbitmq.service_request_queue_prefix,
tenant_event_queue_suffix=settings.rabbitmq.tenant_event_queue_suffix,
tenant_exchange_name=settings.rabbitmq.tenant_exchange_name,
service_request_exchange_name=settings.rabbitmq.service_request_exchange_name,
service_response_exchange_name=settings.rabbitmq.service_response_exchange_name,
service_dead_letter_queue_name=settings.rabbitmq.service_dlq_name,
queue_expiration_time=settings.rabbitmq.queue_expiration_time,
pod_name=settings.kubernetes.pod_name,
)
manager = AsyncQueueManager(
config=config,
tenant_service_url=settings.storage.tenant_server.endpoint,
message_processor=callback,
max_concurrent_tasks=(
settings.asyncio.max_concurrent_tasks if hasattr(settings.asyncio, "max_concurrent_tasks") else 10
),
)
else:
logger.info("Dynamic tenant queues disabled. Running sync queues.")
manager = QueueManager(settings)
app = add_health_check_endpoint(app, manager.is_ready)
if isinstance(manager, AsyncQueueManager):
asyncio.run(run_async_queues(manager, app, port=settings.webserver.port, host=settings.webserver.host))
elif isinstance(manager, QueueManager):
webserver = create_webserver_thread_from_settings(app, settings)
webserver.start()
try:
manager.start_consuming(callback)
except Exception as e:
logger.error(f"An error occurred while consuming messages: {e}", exc_info=True)
sys.exit(1)
else:
logger.warning(f"Behavior for type {type(manager)} is not defined")

View File

@ -1,34 +0,0 @@
class AnalysisFailure(Exception):
pass
class DataLoadingFailure(Exception):
pass
class ProcessingFailure(Exception):
pass
class UnknownStorageBackend(ValueError):
pass
class InvalidEndpoint(ValueError):
pass
class UnknownClient(ValueError):
pass
class ConsumerError(Exception):
pass
class NoSuchContainer(KeyError):
pass
class IntentionalTestException(RuntimeError):
pass

View File

@ -1,64 +0,0 @@
import logging
import requests
from flask import Flask, jsonify
from waitress import serve
from pyinfra.config import CONFIG
logger = logging.getLogger(__file__)
logger.setLevel(CONFIG.service.logging_level)
def run_probing_webserver(app, host=None, port=None, mode=None):
if not host:
host = CONFIG.probing_webserver.host
if not port:
port = CONFIG.probing_webserver.port
if not mode:
mode = CONFIG.probing_webserver.mode
if mode == "development":
app.run(host=host, port=port, debug=True)
elif mode == "production":
serve(app, host=host, port=port)
def set_up_probing_webserver():
# TODO: implement meaningful checks
app = Flask(__name__)
informed_about_missing_prometheus_endpoint = False
@app.route("/ready", methods=["GET"])
def ready():
resp = jsonify("OK")
resp.status_code = 200
return resp
@app.route("/health", methods=["GET"])
def healthy():
resp = jsonify("OK")
resp.status_code = 200
return resp
@app.route("/prometheus", methods=["GET"])
def get_metrics_from_analysis_endpoint():
nonlocal informed_about_missing_prometheus_endpoint
try:
resp = requests.get(f"{CONFIG.rabbitmq.callback.analysis_endpoint}/prometheus")
resp.raise_for_status()
except ConnectionError:
return ""
except requests.exceptions.HTTPError as err:
if resp.status_code == 404:
if not informed_about_missing_prometheus_endpoint:
logger.warning(f"Got no metrics from analysis prometheus endpoint: {err}")
informed_about_missing_prometheus_endpoint = True
else:
logging.warning(f"Caught {err}")
return resp.text
return app

View File

@ -1,18 +0,0 @@
"""Defines constant paths relative to the module root path."""
from pathlib import Path
MODULE_DIR = Path(__file__).resolve().parents[0]
PACKAGE_ROOT_DIR = MODULE_DIR.parents[0]
TEST_DIR = PACKAGE_ROOT_DIR / "test"
CONFIG_FILE = PACKAGE_ROOT_DIR / "config.yaml"
TEST_CONFIG_FILE = TEST_DIR / "config.yaml"
COMPOSE_PATH = PACKAGE_ROOT_DIR
BANNER_FILE = PACKAGE_ROOT_DIR / "banner.txt"

View File

@ -0,0 +1,329 @@
import asyncio
import concurrent.futures
import json
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, Set
import aiohttp
from aio_pika import ExchangeType, IncomingMessage, Message, connect
from aio_pika.abc import (
AbstractChannel,
AbstractConnection,
AbstractExchange,
AbstractIncomingMessage,
AbstractQueue,
)
from aio_pika.exceptions import (
ChannelClosed,
ChannelInvalidStateError,
ConnectionClosed,
)
from aiormq.exceptions import AMQPConnectionError
from kn_utils.logging import logger
from kn_utils.retry import retry
@dataclass
class RabbitMQConfig:
host: str
port: int
username: str
password: str
heartbeat: int
input_queue_prefix: str
tenant_event_queue_suffix: str
tenant_exchange_name: str
service_request_exchange_name: str
service_response_exchange_name: str
service_dead_letter_queue_name: str
queue_expiration_time: int
pod_name: str
connection_params: Dict[str, object] = field(init=False)
def __post_init__(self):
self.connection_params = {
"host": self.host,
"port": self.port,
"login": self.username,
"password": self.password,
"client_properties": {"heartbeat": self.heartbeat},
}
class AsyncQueueManager:
def __init__(
self,
config: RabbitMQConfig,
tenant_service_url: str,
message_processor: Callable[[Dict[str, Any]], Dict[str, Any]],
max_concurrent_tasks: int = 10,
):
self.config = config
self.tenant_service_url = tenant_service_url
self.message_processor = message_processor
self.semaphore = asyncio.Semaphore(max_concurrent_tasks)
self.connection: AbstractConnection | None = None
self.channel: AbstractChannel | None = None
self.tenant_exchange: AbstractExchange | None = None
self.input_exchange: AbstractExchange | None = None
self.output_exchange: AbstractExchange | None = None
self.tenant_exchange_queue: AbstractQueue | None = None
self.tenant_queues: Dict[str, AbstractChannel] = {}
self.consumer_tags: Dict[str, str] = {}
self.message_count: int = 0
@retry(tries=5, exceptions=AMQPConnectionError, reraise=True, logger=logger)
async def connect(self) -> None:
logger.info("Attempting to connect to RabbitMQ...")
self.connection = await connect(**self.config.connection_params)
self.connection.close_callbacks.add(self.on_connection_close)
self.channel = await self.connection.channel()
await self.channel.set_qos(prefetch_count=1)
logger.info("Successfully connected to RabbitMQ")
async def on_connection_close(self, sender, exc):
"""This is a callback for unexpected connection closures."""
logger.debug(f"Sender: {sender}")
if isinstance(exc, ConnectionClosed):
logger.warning("Connection to RabbitMQ lost. Attempting to reconnect...")
try:
active_tenants = await self.fetch_active_tenants()
await self.run(active_tenants=active_tenants)
logger.debug("Reconnected to RabbitMQ successfully")
except Exception as e:
logger.warning(f"Failed to reconnect to RabbitMQ: {e}")
# cancel queue manager and webserver to shutdown service
tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
[task.cancel() for task in tasks if task.get_name() in ["queues", "webserver"]]
else:
logger.debug("Connection closed on purpose.")
async def is_ready(self) -> bool:
if self.connection is None or self.connection.is_closed:
try:
await self.connect()
except Exception as e:
logger.error(f"Failed to connect to RabbitMQ: {e}")
return False
return True
@retry(tries=5, exceptions=(AMQPConnectionError, ChannelInvalidStateError), reraise=True, logger=logger)
async def setup_exchanges(self) -> None:
self.tenant_exchange = await self.channel.declare_exchange(
self.config.tenant_exchange_name, ExchangeType.TOPIC, durable=True
)
self.input_exchange = await self.channel.declare_exchange(
self.config.service_request_exchange_name, ExchangeType.DIRECT, durable=True
)
self.output_exchange = await self.channel.declare_exchange(
self.config.service_response_exchange_name, ExchangeType.DIRECT, durable=True
)
# we must declare DLQ to handle error messages
self.dead_letter_queue = await self.channel.declare_queue(
self.config.service_dead_letter_queue_name, durable=True
)
@retry(tries=5, exceptions=(AMQPConnectionError, ChannelInvalidStateError), reraise=True, logger=logger)
async def setup_tenant_queue(self) -> None:
self.tenant_exchange_queue = await self.channel.declare_queue(
f"{self.config.pod_name}_{self.config.tenant_event_queue_suffix}",
durable=True,
arguments={
"x-dead-letter-exchange": "",
"x-dead-letter-routing-key": self.config.service_dead_letter_queue_name,
"x-expires": self.config.queue_expiration_time,
},
)
await self.tenant_exchange_queue.bind(self.tenant_exchange, routing_key="tenant.*")
self.consumer_tags["tenant_exchange_queue"] = await self.tenant_exchange_queue.consume(
self.process_tenant_message
)
async def process_tenant_message(self, message: AbstractIncomingMessage) -> None:
try:
async with message.process():
message_body = json.loads(message.body.decode())
logger.debug(f"Tenant message received: {message_body}")
tenant_id = message_body["tenantId"]
routing_key = message.routing_key
if routing_key == "tenant.created":
await self.create_tenant_queues(tenant_id)
elif routing_key == "tenant.delete":
await self.delete_tenant_queues(tenant_id)
except Exception as e:
logger.error(e, exc_info=True)
async def create_tenant_queues(self, tenant_id: str) -> None:
queue_name = f"{self.config.input_queue_prefix}_{tenant_id}"
logger.info(f"Declaring queue: {queue_name}")
try:
input_queue = await self.channel.declare_queue(
queue_name,
durable=True,
arguments={
"x-dead-letter-exchange": "",
"x-dead-letter-routing-key": self.config.service_dead_letter_queue_name,
},
)
await input_queue.bind(self.input_exchange, routing_key=tenant_id)
self.consumer_tags[tenant_id] = await input_queue.consume(self.process_input_message)
self.tenant_queues[tenant_id] = input_queue
logger.info(f"Created and started consuming queue for tenant {tenant_id}")
except Exception as e:
logger.error(e, exc_info=True)
async def delete_tenant_queues(self, tenant_id: str) -> None:
if tenant_id in self.tenant_queues:
# somehow queue.delete() does not work here
await self.channel.queue_delete(f"{self.config.input_queue_prefix}_{tenant_id}")
del self.tenant_queues[tenant_id]
del self.consumer_tags[tenant_id]
logger.info(f"Deleted queues for tenant {tenant_id}")
async def process_input_message(self, message: IncomingMessage) -> None:
async def process_message_body_and_await_result(unpacked_message_body):
async with self.semaphore:
loop = asyncio.get_running_loop()
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_pool_executor:
logger.info("Processing payload in a separate thread.")
result = await loop.run_in_executor(
thread_pool_executor, self.message_processor, unpacked_message_body
)
return result
async with message.process(ignore_processed=True):
if message.redelivered:
logger.warning(f"Declining message with {message.delivery_tag=} due to it being redelivered.")
await message.nack(requeue=False)
return
if message.body.decode("utf-8") == "STOP":
logger.info("Received stop signal, stopping consumption...")
await message.ack()
# TODO: shutdown is probably not the right call here - align w/ Dev what should happen on stop signal
await self.shutdown()
return
self.message_count += 1
try:
tenant_id = message.routing_key
filtered_message_headers = (
{k: v for k, v in message.headers.items() if k.lower().startswith("x-")} if message.headers else {}
)
logger.debug(f"Processing message with {filtered_message_headers=}.")
result: dict = await (
process_message_body_and_await_result({**json.loads(message.body), **filtered_message_headers})
or {}
)
if result:
await self.publish_to_output_exchange(tenant_id, result, filtered_message_headers)
await message.ack()
logger.debug(f"Message with {message.delivery_tag=} acknowledged.")
else:
raise ValueError(f"Could not process message with {message.body=}.")
except json.JSONDecodeError:
await message.nack(requeue=False)
logger.error(f"Invalid JSON in input message: {message.body}", exc_info=True)
except FileNotFoundError as e:
logger.warning(f"{e}, declining message with {message.delivery_tag=}.", exc_info=True)
await message.nack(requeue=False)
except Exception as e:
await message.nack(requeue=False)
logger.error(f"Error processing input message: {e}", exc_info=True)
finally:
self.message_count -= 1
async def publish_to_output_exchange(self, tenant_id: str, result: Dict[str, Any], headers: Dict[str, Any]) -> None:
await self.output_exchange.publish(
Message(body=json.dumps(result).encode(), headers=headers),
routing_key=tenant_id,
)
logger.info(f"Published result to queue {tenant_id}.")
@retry(tries=5, exceptions=(aiohttp.ClientResponseError, aiohttp.ClientConnectorError), reraise=True, logger=logger)
async def fetch_active_tenants(self) -> Set[str]:
async with aiohttp.ClientSession() as session:
async with session.get(self.tenant_service_url) as response:
response.raise_for_status()
if response.headers["content-type"].lower() == "application/json":
data = await response.json()
return {tenant["tenantId"] for tenant in data}
else:
logger.error(
f"Failed to fetch active tenants. Content type is not JSON: {response.headers['content-type'].lower()}"
)
return set()
@retry(
tries=5,
exceptions=(
AMQPConnectionError,
ChannelInvalidStateError,
),
reraise=True,
logger=logger,
)
async def initialize_tenant_queues(self, active_tenants: set) -> None:
for tenant_id in active_tenants:
await self.create_tenant_queues(tenant_id)
async def run(self, active_tenants: set) -> None:
await self.connect()
await self.setup_exchanges()
await self.initialize_tenant_queues(active_tenants=active_tenants)
await self.setup_tenant_queue()
logger.info("RabbitMQ handler is running. Press CTRL+C to exit.")
async def close_channels(self) -> None:
try:
if self.channel and not self.channel.is_closed:
# Cancel queues to stop fetching messages
logger.debug("Cancelling queues...")
for tenant, queue in self.tenant_queues.items():
await queue.cancel(self.consumer_tags[tenant])
if self.tenant_exchange_queue:
await self.tenant_exchange_queue.cancel(self.consumer_tags["tenant_exchange_queue"])
while self.message_count != 0:
logger.debug(f"Messages are still being processed: {self.message_count=} ")
await asyncio.sleep(2)
await self.channel.close(exc=asyncio.CancelledError)
logger.debug("Channel closed.")
else:
logger.debug("No channel to close.")
except ChannelClosed:
logger.warning("Channel was already closed.")
except ConnectionClosed:
logger.warning("Connection was lost, unable to close channel.")
except Exception as e:
logger.error(f"Error during channel shutdown: {e}")
async def close_connection(self) -> None:
try:
if self.connection and not self.connection.is_closed:
await self.connection.close(exc=asyncio.CancelledError)
logger.debug("Connection closed.")
else:
logger.debug("No connection to close.")
except ConnectionClosed:
logger.warning("Connection was already closed.")
except Exception as e:
logger.error(f"Error closing connection: {e}")
async def shutdown(self) -> None:
logger.info("Shutting down RabbitMQ handler...")
await self.close_channels()
await self.close_connection()
logger.info("RabbitMQ handler shut down successfully.")

42
pyinfra/queue/callback.py Normal file
View File

@ -0,0 +1,42 @@
from typing import Callable
from dynaconf import Dynaconf
from kn_utils.logging import logger
from pyinfra.storage.connection import get_storage
from pyinfra.storage.utils import (
download_data_bytes_as_specified_in_message,
upload_data_as_specified_in_message,
DownloadedData,
)
DataProcessor = Callable[[dict[str, DownloadedData] | DownloadedData, dict], dict | list | str]
Callback = Callable[[dict], dict]
def make_download_process_upload_callback(data_processor: DataProcessor, settings: Dynaconf) -> Callback:
"""Default callback for processing queue messages.
Data will be downloaded from the storage as specified in the message. If a tenant id is specified, the storage
will be configured to use that tenant id, otherwise the storage is configured as specified in the settings.
The data is the passed to the dataprocessor, together with the message. The dataprocessor should return a
json serializable object. This object is then uploaded to the storage as specified in the message. The response
message is just the original message.
"""
def inner(queue_message_payload: dict) -> dict:
logger.info(f"Processing payload with download-process-upload callback...")
storage = get_storage(settings, queue_message_payload.get("X-TENANT-ID"))
data: dict[str, DownloadedData] | DownloadedData = download_data_bytes_as_specified_in_message(
storage, queue_message_payload
)
result = data_processor(data, queue_message_payload)
upload_data_as_specified_in_message(storage, queue_message_payload, result)
return queue_message_payload
return inner

View File

@ -1,16 +0,0 @@
from pyinfra.queue.queue_manager.queue_manager import QueueManager
class Consumer:
def __init__(self, callback, queue_manager: QueueManager):
self.queue_manager = queue_manager
self.callback = callback
def consume_and_publish(self):
self.queue_manager.consume_and_publish(self.callback)
def basic_consume_and_publish(self):
self.queue_manager.basic_consume_and_publish(self.callback)
def consume(self, **kwargs):
return self.queue_manager.consume(**kwargs)

229
pyinfra/queue/manager.py Normal file
View File

@ -0,0 +1,229 @@
import atexit
import concurrent.futures
import json
import logging
import signal
import sys
from typing import Callable, Union
import pika
import pika.exceptions
from dynaconf import Dynaconf
from kn_utils.logging import logger
from kn_utils.retry import retry
from pika.adapters.blocking_connection import BlockingChannel, BlockingConnection
from pyinfra.config.loader import validate_settings
from pyinfra.config.validators import queue_manager_validators
pika_logger = logging.getLogger("pika")
pika_logger.setLevel(logging.WARNING) # disables non-informative pika log clutter
MessageProcessor = Callable[[dict], dict]
class QueueManager:
def __init__(self, settings: Dynaconf):
validate_settings(settings, queue_manager_validators)
self.input_queue = settings.rabbitmq.input_queue
self.output_queue = settings.rabbitmq.output_queue
self.dead_letter_queue = settings.rabbitmq.dead_letter_queue
self.connection_parameters = self.create_connection_parameters(settings)
self.connection: Union[BlockingConnection, None] = None
self.channel: Union[BlockingChannel, None] = None
self.connection_sleep = settings.rabbitmq.connection_sleep
self.processing_callback = False
self.received_signal = False
atexit.register(self.stop_consuming)
signal.signal(signal.SIGTERM, self._handle_stop_signal)
signal.signal(signal.SIGINT, self._handle_stop_signal)
self.max_retries = settings.rabbitmq.max_retries or 5
self.max_delay = settings.rabbitmq.max_delay or 60
@staticmethod
def create_connection_parameters(settings: Dynaconf):
credentials = pika.PlainCredentials(username=settings.rabbitmq.username, password=settings.rabbitmq.password)
pika_connection_params = {
"host": settings.rabbitmq.host,
"port": settings.rabbitmq.port,
"credentials": credentials,
"heartbeat": settings.rabbitmq.heartbeat,
}
return pika.ConnectionParameters(**pika_connection_params)
@retry(
tries=5,
exceptions=(pika.exceptions.AMQPConnectionError, pika.exceptions.ChannelClosedByBroker),
reraise=True,
)
def establish_connection(self):
if self.connection and self.connection.is_open:
logger.debug("Connection to RabbitMQ already established.")
return
logger.info("Establishing connection to RabbitMQ...")
self.connection = pika.BlockingConnection(parameters=self.connection_parameters)
logger.debug("Opening channel...")
self.channel = self.connection.channel()
self.channel.basic_qos(prefetch_count=1)
args = {
"x-dead-letter-exchange": "",
"x-dead-letter-routing-key": self.dead_letter_queue,
}
self.channel.queue_declare(self.input_queue, arguments=args, auto_delete=False, durable=True)
self.channel.queue_declare(self.output_queue, arguments=args, auto_delete=False, durable=True)
logger.info("Connection to RabbitMQ established, channel open.")
def is_ready(self):
try:
self.establish_connection()
return self.channel.is_open
except Exception as e:
logger.error(f"Failed to establish connection: {e}")
return False
@retry(
tries=5,
exceptions=pika.exceptions.AMQPConnectionError,
reraise=True,
)
def start_consuming(self, message_processor: Callable):
on_message_callback = self._make_on_message_callback(message_processor)
try:
self.establish_connection()
self.channel.basic_consume(self.input_queue, on_message_callback)
logger.info("Starting to consume messages...")
self.channel.start_consuming()
except pika.exceptions.AMQPConnectionError as e:
logger.error(f"AMQP Connection Error: {e}")
raise
except Exception as e:
logger.error(f"An unexpected error occurred while consuming messages: {e}", exc_info=True)
raise
finally:
self.stop_consuming()
def stop_consuming(self):
if self.channel and self.channel.is_open:
logger.info("Stopping consuming...")
self.channel.stop_consuming()
logger.info("Closing channel...")
self.channel.close()
if self.connection and self.connection.is_open:
logger.info("Closing connection to RabbitMQ...")
self.connection.close()
def publish_message_to_input_queue(self, message: Union[str, bytes, dict], properties: pika.BasicProperties = None):
if isinstance(message, str):
message = message.encode("utf-8")
elif isinstance(message, dict):
message = json.dumps(message).encode("utf-8")
self.establish_connection()
self.channel.basic_publish(
"",
self.input_queue,
properties=properties,
body=message,
)
logger.info(f"Published message to queue {self.input_queue}.")
def purge_queues(self):
self.establish_connection()
try:
self.channel.queue_purge(self.input_queue)
self.channel.queue_purge(self.output_queue)
logger.info("Queues purged.")
except pika.exceptions.ChannelWrongStateError:
pass
def get_message_from_output_queue(self):
self.establish_connection()
return self.channel.basic_get(self.output_queue, auto_ack=True)
def _make_on_message_callback(self, message_processor: MessageProcessor):
def process_message_body_and_await_result(unpacked_message_body):
# Processing the message in a separate thread is necessary for the main thread pika client to be able to
# process data events (e.g. heartbeats) while the message is being processed.
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_pool_executor:
logger.info("Processing payload in separate thread.")
future = thread_pool_executor.submit(message_processor, unpacked_message_body)
# TODO: This block is probably not necessary, but kept since the implications of removing it are
# unclear. Remove it in a future iteration where less changes are being made to the code base.
while future.running():
logger.debug("Waiting for payload processing to finish...")
self.connection.sleep(self.connection_sleep)
return future.result()
def on_message_callback(channel, method, properties, body):
logger.info(f"Received message from queue with delivery_tag {method.delivery_tag}.")
self.processing_callback = True
if method.redelivered:
logger.warning(f"Declining message with {method.delivery_tag=} due to it being redelivered.")
channel.basic_nack(method.delivery_tag, requeue=False)
return
if body.decode("utf-8") == "STOP":
logger.info(f"Received stop signal, stopping consuming...")
channel.basic_ack(delivery_tag=method.delivery_tag)
self.stop_consuming()
return
try:
filtered_message_headers = (
{k: v for k, v in properties.headers.items() if k.lower().startswith("x-")}
if properties.headers
else {}
)
logger.debug(f"Processing message with {filtered_message_headers=}.")
result: dict = (
process_message_body_and_await_result({**json.loads(body), **filtered_message_headers}) or {}
)
channel.basic_publish(
"",
self.output_queue,
json.dumps(result).encode(),
properties=pika.BasicProperties(headers=filtered_message_headers),
)
logger.info(f"Published result to queue {self.output_queue}.")
channel.basic_ack(delivery_tag=method.delivery_tag)
logger.debug(f"Message with {method.delivery_tag=} acknowledged.")
except FileNotFoundError as e:
logger.warning(f"{e}, declining message with {method.delivery_tag=}.")
channel.basic_nack(method.delivery_tag, requeue=False)
except Exception:
logger.warning(f"Failed to process message with {method.delivery_tag=}, declining...", exc_info=True)
channel.basic_nack(method.delivery_tag, requeue=False)
raise
finally:
self.processing_callback = False
if self.received_signal:
self.stop_consuming()
sys.exit(0)
return on_message_callback
def _handle_stop_signal(self, signum, *args, **kwargs):
logger.info(f"Received signal {signum}, stopping consuming...")
self.received_signal = True
if not self.processing_callback:
self.stop_consuming()
sys.exit(0)

View File

@ -1,162 +0,0 @@
import json
import logging
import time
import pika
from pyinfra.config import CONFIG
from pyinfra.exceptions import ProcessingFailure, DataLoadingFailure
from pyinfra.queue.queue_manager.queue_manager import QueueHandle, QueueManager
logger = logging.getLogger("pika")
logger.setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
logger.setLevel(CONFIG.service.logging_level)
def monkey_patch_queue_handle(channel, queue) -> QueueHandle:
empty_message = (None, None, None)
def is_empty_message(message):
return message == empty_message
queue_handle = QueueHandle()
queue_handle.empty = lambda: is_empty_message(channel.basic_get(queue))
def produce_items():
while True:
message = channel.basic_get(queue)
if is_empty_message(message):
break
method_frame, properties, body = message
channel.basic_ack(method_frame.delivery_tag)
yield json.loads(body)
queue_handle.to_list = lambda: list(produce_items())
return queue_handle
def get_connection_params():
credentials = pika.PlainCredentials(username=CONFIG.rabbitmq.user, password=CONFIG.rabbitmq.password)
kwargs = {
"host": CONFIG.rabbitmq.host,
"port": CONFIG.rabbitmq.port,
"credentials": credentials,
"heartbeat": CONFIG.rabbitmq.heartbeat,
}
parameters = pika.ConnectionParameters(**kwargs)
return parameters
def get_n_previous_attempts(props):
return 0 if props.headers is None else props.headers.get("x-retry-count", 0)
def attempts_remain(n_attempts, max_attempts):
return n_attempts < max_attempts
class PikaQueueManager(QueueManager):
def __init__(self, input_queue, output_queue, dead_letter_queue=None, connection_params=None):
super().__init__(input_queue, output_queue)
if not connection_params:
connection_params = get_connection_params()
self.connection = pika.BlockingConnection(parameters=connection_params)
self.channel = self.connection.channel()
self.channel.basic_qos(prefetch_count=1)
if not dead_letter_queue:
dead_letter_queue = CONFIG.rabbitmq.queues.dead_letter
args = {"x-dead-letter-exchange": "", "x-dead-letter-routing-key": dead_letter_queue}
self.channel.queue_declare(input_queue, arguments=args, auto_delete=False, durable=True)
self.channel.queue_declare(output_queue, arguments=args, auto_delete=False, durable=True)
def republish(self, body, n_current_attempts, frame):
self.channel.basic_publish(
exchange="",
routing_key=self._input_queue,
body=body,
properties=pika.BasicProperties(headers={"x-retry-count": n_current_attempts}),
)
self.channel.basic_ack(delivery_tag=frame.delivery_tag)
def publish_request(self, request):
logger.debug(f"Publishing {request}")
self.channel.basic_publish("", self._input_queue, json.dumps(request).encode())
def reject(self, body, frame):
logger.error(f"Adding to dead letter queue: {body}")
self.channel.basic_reject(delivery_tag=frame.delivery_tag, requeue=False)
def publish_response(self, message, callback, max_attempts=3):
logger.debug(f"Processing {message}.")
frame, properties, body = message
n_attempts = get_n_previous_attempts(properties) + 1
try:
response = json.dumps(callback(json.loads(body)))
self.channel.basic_publish("", self._output_queue, response.encode())
self.channel.basic_ack(frame.delivery_tag)
except (ProcessingFailure, DataLoadingFailure):
logger.error(f"Message failed to process {n_attempts}/{max_attempts} times: {body}")
if attempts_remain(n_attempts, max_attempts):
self.republish(body, n_attempts, frame)
else:
self.reject(body, frame)
def pull_request(self):
return self.channel.basic_get(self._input_queue)
def consume(self, inactivity_timeout=None):
logger.debug("Consuming")
return self.channel.consume(self._input_queue, inactivity_timeout=inactivity_timeout)
def consume_and_publish(self, visitor):
logger.info(f"Consuming with callback {visitor.callback.__name__}")
for message in self.consume():
self.publish_response(message, visitor)
def basic_consume_and_publish(self, visitor):
logger.info(f"Basic consuming with callback {visitor.callback.__name__}")
def callback(channel, frame, properties, body):
message = (frame, properties, body)
return self.publish_response(message, visitor)
self.channel.basic_consume(self._input_queue, callback)
self.channel.start_consuming()
def clear(self):
try:
self.channel.queue_purge(self._input_queue)
self.channel.queue_purge(self._output_queue)
except pika.exceptions.ChannelWrongStateError:
pass
@property
def input_queue(self) -> QueueHandle:
return monkey_patch_queue_handle(self.channel, self._input_queue)
@property
def output_queue(self) -> QueueHandle:
return monkey_patch_queue_handle(self.channel, self._output_queue)

View File

@ -1,51 +0,0 @@
import abc
class QueueHandle:
def empty(self) -> bool:
raise NotImplementedError
def to_list(self) -> list:
raise NotImplementedError
class QueueManager(abc.ABC):
def __init__(self, input_queue, output_queue):
self._input_queue = input_queue
self._output_queue = output_queue
@abc.abstractmethod
def publish_request(self, request):
raise NotImplementedError
@abc.abstractmethod
def publish_response(self, response, callback):
raise NotImplementedError
@abc.abstractmethod
def pull_request(self):
raise NotImplementedError
@abc.abstractmethod
def consume(self, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def clear(self):
raise NotImplementedError
@abc.abstractmethod
def input_queue(self) -> QueueHandle:
raise NotImplementedError
@abc.abstractmethod
def output_queue(self) -> QueueHandle:
raise NotImplementedError
@abc.abstractmethod
def consume_and_publish(self, callback):
raise NotImplementedError
@abc.abstractmethod
def basic_consume_and_publish(self, callback):
raise NotImplementedError

View File

@ -1,34 +0,0 @@
from abc import ABC, abstractmethod
class StorageAdapter(ABC):
def __init__(self, client):
self.__client = client
@abstractmethod
def make_bucket(self, bucket_name):
raise NotImplementedError
@abstractmethod
def has_bucket(self, bucket_name):
raise NotImplementedError
@abstractmethod
def put_object(self, bucket_name, object_name, data):
raise NotImplementedError
@abstractmethod
def get_object(self, bucket_name, object_name):
raise NotImplementedError
@abstractmethod
def get_all_objects(self, bucket_name):
raise NotImplementedError
@abstractmethod
def clear_bucket(self, bucket_name):
raise NotImplementedError
@abstractmethod
def get_all_object_names(self, bucket_name):
raise NotImplementedError

View File

@ -1,65 +0,0 @@
import logging
from itertools import repeat
from operator import attrgetter
from azure.storage.blob import ContainerClient, BlobServiceClient
from pyinfra.storage.adapters.adapter import StorageAdapter
logger = logging.getLogger(__name__)
logging.getLogger("azure").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
class AzureStorageAdapter(StorageAdapter):
def __init__(self, client):
super().__init__(client=client)
self.__client: BlobServiceClient = self._StorageAdapter__client
def has_bucket(self, bucket_name):
container_client = self.__client.get_container_client(bucket_name)
return container_client.exists()
def make_bucket(self, bucket_name):
container_client = self.__client.get_container_client(bucket_name)
container_client if container_client.exists() else self.__client.create_container(bucket_name)
def __provide_container_client(self, bucket_name) -> ContainerClient:
self.make_bucket(bucket_name)
container_client = self.__client.get_container_client(bucket_name)
return container_client
def put_object(self, bucket_name, object_name, data):
logger.debug(f"Uploading '{object_name}'...")
container_client = self.__provide_container_client(bucket_name)
blob_client = container_client.get_blob_client(object_name)
blob_client.upload_blob(data, overwrite=True)
def get_object(self, bucket_name, object_name):
logger.debug(f"Downloading '{object_name}'...")
container_client = self.__provide_container_client(bucket_name)
blob_client = container_client.get_blob_client(object_name)
blob_data = blob_client.download_blob()
return blob_data.readall()
def get_all_objects(self, bucket_name):
container_client = self.__provide_container_client(bucket_name)
blobs = container_client.list_blobs()
for blob in blobs:
logger.debug(f"Downloading '{blob.name}'...")
blob_client = container_client.get_blob_client(blob)
blob_data = blob_client.download_blob()
data = blob_data.readall()
yield data
def clear_bucket(self, bucket_name):
logger.debug(f"Clearing Azure container '{bucket_name}'...")
container_client = self.__client.get_container_client(bucket_name)
blobs = container_client.list_blobs()
container_client.delete_blobs(*blobs)
def get_all_object_names(self, bucket_name):
container_client = self.__provide_container_client(bucket_name)
blobs = container_client.list_blobs()
return zip(repeat(bucket_name), map(attrgetter("name"), blobs))

View File

@ -1,58 +0,0 @@
import io
from itertools import repeat
import logging
from operator import attrgetter
from minio import Minio
from pyinfra.exceptions import DataLoadingFailure
from pyinfra.storage.adapters.adapter import StorageAdapter
logger = logging.getLogger(__name__)
class S3StorageAdapter(StorageAdapter):
def __init__(self, client):
super().__init__(client=client)
self.__client: Minio = self._StorageAdapter__client
def make_bucket(self, bucket_name):
if not self.has_bucket(bucket_name):
self.__client.make_bucket(bucket_name)
def has_bucket(self, bucket_name):
return self.__client.bucket_exists(bucket_name)
def put_object(self, bucket_name, object_name, data):
logger.debug(f"Uploading '{object_name}'...")
data = io.BytesIO(data)
self.__client.put_object(bucket_name, object_name, data, length=data.getbuffer().nbytes)
def get_object(self, bucket_name, object_name):
logger.debug(f"Downloading '{object_name}'...")
response = None
try:
response = self.__client.get_object(bucket_name, object_name)
return response.data
except Exception as err:
raise DataLoadingFailure("Failed getting object from s3 client") from err
finally:
if response:
response.close()
response.release_conn()
def get_all_objects(self, bucket_name):
for obj in self.__client.list_objects(bucket_name, recursive=True):
logger.debug(f"Downloading '{obj.object_name}'...")
yield self.get_object(bucket_name, obj.object_name)
def clear_bucket(self, bucket_name):
logger.debug(f"Clearing S3 bucket '{bucket_name}'...")
objects = self.__client.list_objects(bucket_name, recursive=True)
for obj in objects:
self.__client.remove_object(bucket_name, obj.object_name)
def get_all_object_names(self, bucket_name):
objs = self.__client.list_objects(bucket_name, recursive=True)
return zip(repeat(bucket_name), map(attrgetter("object_name"), objs))

View File

@ -1,11 +0,0 @@
from azure.storage.blob import BlobServiceClient
from pyinfra.config import CONFIG
def get_azure_client(connection_string=None) -> BlobServiceClient:
if not connection_string:
connection_string = CONFIG.storage.azure.connection_string
return BlobServiceClient.from_connection_string(conn_str=connection_string)

View File

@ -1,40 +0,0 @@
import re
from minio import Minio
from pyinfra.config import CONFIG
from pyinfra.exceptions import InvalidEndpoint
def parse_endpoint(endpoint):
# FIXME Greedy matching (.+) since we get random storage names on kubernetes (eg http://red-research-headless:9000)
# FIXME this has been broken and accepts invalid URLs
endpoint_pattern = r"(?P<protocol>https?)*(?:://)*(?P<address>(?:(?:(?:\d{1,3}\.){3}\d{1,3})|.+)(?:\:\d+)?)"
match = re.match(endpoint_pattern, endpoint)
if not match:
raise InvalidEndpoint(f"Endpoint {endpoint} is invalid; expected {endpoint_pattern}")
return {"secure": match.group("protocol") == "https", "endpoint": match.group("address")}
def get_s3_client(params=None) -> Minio:
"""
Args:
params: dict like
{
"endpoint": <storage_endpoint>
"access_key": <storage_key>
"secret_key": <storage_secret>
}
"""
if not params:
params = CONFIG.storage.s3
return Minio(
**parse_endpoint(params.endpoint),
access_key=params.access_key,
secret_key=params.secret_key,
region=params.region,
)

View File

@ -0,0 +1,89 @@
from functools import lru_cache
import requests
from dynaconf import Dynaconf
from kn_utils.logging import logger
from pyinfra.config.loader import validate_settings
from pyinfra.config.validators import (
multi_tenant_storage_validators,
storage_validators,
)
from pyinfra.storage.storages.azure import get_azure_storage_from_settings
from pyinfra.storage.storages.s3 import get_s3_storage_from_settings
from pyinfra.storage.storages.storage import Storage
from pyinfra.utils.cipher import decrypt
def get_storage(settings: Dynaconf, tenant_id: str = None) -> Storage:
"""Establishes a storage connection.
If tenant_id is provided, gets storage connection information from tenant server. These connections are cached.
Otherwise, gets storage connection information from settings.
"""
logger.info("Establishing storage connection...")
if tenant_id:
logger.info(f"Using tenant storage for {tenant_id}.")
validate_settings(settings, multi_tenant_storage_validators)
return get_storage_for_tenant(
tenant_id,
settings.storage.tenant_server.endpoint,
settings.storage.tenant_server.public_key,
)
logger.info("Using default storage.")
validate_settings(settings, storage_validators)
return storage_dispatcher[settings.storage.backend](settings)
storage_dispatcher = {
"azure": get_azure_storage_from_settings,
"s3": get_s3_storage_from_settings,
}
@lru_cache(maxsize=10)
def get_storage_for_tenant(tenant: str, endpoint: str, public_key: str) -> Storage:
response = requests.get(f"{endpoint}/{tenant}").json()
maybe_azure = response.get("azureStorageConnection")
maybe_s3 = response.get("s3StorageConnection")
assert (maybe_azure or maybe_s3) and not (maybe_azure and maybe_s3), "Only one storage backend can be used."
if maybe_azure:
connection_string = decrypt(public_key, maybe_azure["connectionString"])
backend = "azure"
storage_info = {
"storage": {
"azure": {
"connection_string": connection_string,
"container": maybe_azure["containerName"],
},
}
}
elif maybe_s3:
secret = decrypt(public_key, maybe_s3["secret"])
backend = "s3"
storage_info = {
"storage": {
"s3": {
"endpoint": maybe_s3["endpoint"],
"key": maybe_s3["key"],
"secret": secret,
"region": maybe_s3["region"],
"bucket": maybe_s3["bucketName"],
},
}
}
else:
raise Exception(f"Unknown storage backend in {response}.")
storage_settings = Dynaconf()
storage_settings.update(storage_info)
storage = storage_dispatcher[backend](storage_settings)
return storage

View File

@ -1,44 +0,0 @@
import logging
from retry import retry
from pyinfra.config import CONFIG
from pyinfra.exceptions import DataLoadingFailure
from pyinfra.storage.adapters.adapter import StorageAdapter
logger = logging.getLogger(__name__)
logger.setLevel(CONFIG.service.logging_level)
class Storage:
def __init__(self, adapter: StorageAdapter):
self.__adapter = adapter
def make_bucket(self, bucket_name):
self.__adapter.make_bucket(bucket_name)
def has_bucket(self, bucket_name):
return self.__adapter.has_bucket(bucket_name)
def put_object(self, bucket_name, object_name, data):
self.__adapter.put_object(bucket_name, object_name, data)
def get_object(self, bucket_name, object_name):
return self.__get_object(bucket_name, object_name)
@retry(DataLoadingFailure, tries=3, delay=5, jitter=(1, 3))
def __get_object(self, bucket_name, object_name):
try:
return self.__adapter.get_object(bucket_name, object_name)
except Exception as err:
logging.error(err)
raise DataLoadingFailure from err
def get_all_objects(self, bucket_name):
return self.__adapter.get_all_objects(bucket_name)
def clear_bucket(self, bucket_name):
return self.__adapter.clear_bucket(bucket_name)
def get_all_object_names(self, bucket_name):
return self.__adapter.get_all_object_names(bucket_name)

View File

@ -1,26 +0,0 @@
from pyinfra.exceptions import UnknownStorageBackend
from pyinfra.storage.adapters.azure import AzureStorageAdapter
from pyinfra.storage.adapters.s3 import S3StorageAdapter
from pyinfra.storage.clients.azure import get_azure_client
from pyinfra.storage.clients.s3 import get_s3_client
from pyinfra.storage.storage import Storage
def get_azure_storage(config=None):
return Storage(AzureStorageAdapter(get_azure_client(config)))
def get_s3_storage(config=None):
return Storage(S3StorageAdapter(get_s3_client(config)))
def get_storage(storage_backend):
if storage_backend == "s3":
storage = get_s3_storage()
elif storage_backend == "azure":
storage = get_azure_storage()
else:
raise UnknownStorageBackend(f"Unknown storage backend '{storage_backend}'.")
return storage

View File

@ -0,0 +1,91 @@
import logging
from itertools import repeat
from operator import attrgetter
from azure.storage.blob import BlobServiceClient, ContainerClient
from dynaconf import Dynaconf
from kn_utils.logging import logger
from retry import retry
from pyinfra.config.loader import validate_settings
from pyinfra.config.validators import azure_storage_validators
from pyinfra.storage.storages.storage import Storage
logging.getLogger("azure").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
class AzureStorage(Storage):
def __init__(self, client: BlobServiceClient, bucket: str):
self._client: BlobServiceClient = client
self._bucket = bucket
@property
def bucket(self):
return self._bucket
def has_bucket(self):
container_client = self._client.get_container_client(self.bucket)
return container_client.exists()
def make_bucket(self):
container_client = self._client.get_container_client(self.bucket)
container_client if container_client.exists() else self._client.create_container(self.bucket)
def __provide_container_client(self) -> ContainerClient:
self.make_bucket()
container_client = self._client.get_container_client(self.bucket)
return container_client
def put_object(self, object_name, data):
logger.debug(f"Uploading '{object_name}'...")
container_client = self.__provide_container_client()
blob_client = container_client.get_blob_client(object_name)
blob_client.upload_blob(data, overwrite=True)
def exists(self, object_name):
container_client = self.__provide_container_client()
blob_client = container_client.get_blob_client(object_name)
return blob_client.exists()
@retry(tries=3, delay=5, jitter=(1, 3))
def get_object(self, object_name):
logger.debug(f"Downloading '{object_name}'...")
try:
container_client = self.__provide_container_client()
blob_client = container_client.get_blob_client(object_name)
blob_data = blob_client.download_blob()
return blob_data.readall()
except Exception as err:
raise Exception("Failed getting object from azure client") from err
def get_all_objects(self):
container_client = self.__provide_container_client()
blobs = container_client.list_blobs()
for blob in blobs:
logger.debug(f"Downloading '{blob.name}'...")
blob_client = container_client.get_blob_client(blob)
blob_data = blob_client.download_blob()
data = blob_data.readall()
yield data
def clear_bucket(self):
logger.debug(f"Clearing Azure container '{self.bucket}'...")
container_client = self._client.get_container_client(self.bucket)
blobs = container_client.list_blobs()
container_client.delete_blobs(*blobs)
def get_all_object_names(self):
container_client = self.__provide_container_client()
blobs = container_client.list_blobs()
return zip(repeat(self.bucket), map(attrgetter("name"), blobs))
def get_azure_storage_from_settings(settings: Dynaconf):
validate_settings(settings, azure_storage_validators)
return AzureStorage(
client=BlobServiceClient.from_connection_string(conn_str=settings.storage.azure.connection_string),
bucket=settings.storage.azure.container,
)

View File

@ -0,0 +1,89 @@
import io
from itertools import repeat
from operator import attrgetter
from dynaconf import Dynaconf
from kn_utils.logging import logger
from minio import Minio
from retry import retry
from pyinfra.config.loader import validate_settings
from pyinfra.config.validators import s3_storage_validators
from pyinfra.storage.storages.storage import Storage
from pyinfra.utils.url_parsing import validate_and_parse_s3_endpoint
class S3Storage(Storage):
def __init__(self, client: Minio, bucket: str):
self._client = client
self._bucket = bucket
@property
def bucket(self):
return self._bucket
def make_bucket(self):
if not self.has_bucket():
self._client.make_bucket(self.bucket)
def has_bucket(self):
return self._client.bucket_exists(self.bucket)
def put_object(self, object_name, data):
logger.debug(f"Uploading '{object_name}'...")
data = io.BytesIO(data)
self._client.put_object(self.bucket, object_name, data, length=data.getbuffer().nbytes)
def exists(self, object_name):
try:
self._client.stat_object(self.bucket, object_name)
return True
except Exception:
return False
@retry(tries=3, delay=5, jitter=(1, 3))
def get_object(self, object_name):
logger.debug(f"Downloading '{object_name}'...")
response = None
try:
response = self._client.get_object(self.bucket, object_name)
return response.data
except Exception as err:
raise Exception("Failed getting object from s3 client") from err
finally:
if response:
response.close()
response.release_conn()
def get_all_objects(self):
for obj in self._client.list_objects(self.bucket, recursive=True):
logger.debug(f"Downloading '{obj.object_name}'...")
yield self.get_object(obj.object_name)
def clear_bucket(self):
logger.debug(f"Clearing S3 bucket '{self.bucket}'...")
objects = self._client.list_objects(self.bucket, recursive=True)
for obj in objects:
self._client.remove_object(self.bucket, obj.object_name)
def get_all_object_names(self):
objs = self._client.list_objects(self.bucket, recursive=True)
return zip(repeat(self.bucket), map(attrgetter("object_name"), objs))
def get_s3_storage_from_settings(settings: Dynaconf):
validate_settings(settings, s3_storage_validators)
secure, endpoint = validate_and_parse_s3_endpoint(settings.storage.s3.endpoint)
return S3Storage(
client=Minio(
secure=secure,
endpoint=endpoint,
access_key=settings.storage.s3.key,
secret_key=settings.storage.s3.secret,
region=settings.storage.s3.region,
),
bucket=settings.storage.s3.bucket,
)

View File

@ -0,0 +1,40 @@
from abc import ABC, abstractmethod
class Storage(ABC):
@property
@abstractmethod
def bucket(self):
raise NotImplementedError
@abstractmethod
def make_bucket(self):
raise NotImplementedError
@abstractmethod
def has_bucket(self):
raise NotImplementedError
@abstractmethod
def put_object(self, object_name, data):
raise NotImplementedError
@abstractmethod
def exists(self, object_name):
raise NotImplementedError
@abstractmethod
def get_object(self, object_name):
raise NotImplementedError
@abstractmethod
def get_all_objects(self):
raise NotImplementedError
@abstractmethod
def clear_bucket(self):
raise NotImplementedError
@abstractmethod
def get_all_object_names(self):
raise NotImplementedError

150
pyinfra/storage/utils.py Normal file
View File

@ -0,0 +1,150 @@
import gzip
import json
from functools import singledispatch
from typing import TypedDict
from kn_utils.logging import logger
from pydantic import BaseModel, ValidationError
from pyinfra.storage.storages.storage import Storage
class DossierIdFileIdDownloadPayload(BaseModel):
dossierId: str
fileId: str
targetFileExtension: str
@property
def targetFilePath(self):
return f"{self.dossierId}/{self.fileId}.{self.targetFileExtension}"
class TenantIdDossierIdFileIdDownloadPayload(BaseModel):
tenantId: str
dossierId: str
fileId: str
targetFileExtension: str
@property
def targetFilePath(self):
return f"{self.tenantId}/{self.dossierId}/{self.fileId}.{self.targetFileExtension}"
class DossierIdFileIdUploadPayload(BaseModel):
dossierId: str
fileId: str
responseFileExtension: str
@property
def responseFilePath(self):
return f"{self.dossierId}/{self.fileId}.{self.responseFileExtension}"
class TenantIdDossierIdFileIdUploadPayload(BaseModel):
tenantId: str
dossierId: str
fileId: str
responseFileExtension: str
@property
def responseFilePath(self):
return f"{self.tenantId}/{self.dossierId}/{self.fileId}.{self.responseFileExtension}"
class TargetResponseFilePathDownloadPayload(BaseModel):
targetFilePath: str | dict[str, str]
class TargetResponseFilePathUploadPayload(BaseModel):
responseFilePath: str
class DownloadedData(TypedDict):
data: bytes
file_path: str
def download_data_bytes_as_specified_in_message(
storage: Storage, raw_payload: dict
) -> dict[str, DownloadedData] | DownloadedData:
"""Convenience function to download a file specified in a message payload.
Supports both legacy and new payload formats. Also supports downloading multiple files at once, which should
be specified in a dictionary under the 'targetFilePath' key with the file path as value.
The data is downloaded as bytes and returned as a dictionary with the file path as key and the data as value.
In case of several download targets, a nested dictionary is returned with the same keys and dictionaries with
the file path and data as values.
"""
try:
if "tenantId" in raw_payload and "dossierId" in raw_payload:
payload = TenantIdDossierIdFileIdDownloadPayload(**raw_payload)
elif "tenantId" not in raw_payload and "dossierId" in raw_payload:
payload = DossierIdFileIdDownloadPayload(**raw_payload)
else:
payload = TargetResponseFilePathDownloadPayload(**raw_payload)
except ValidationError:
raise ValueError("No download file path found in payload, nothing to download.")
data = _download(payload.targetFilePath, storage)
return data
@singledispatch
def _download(
file_path_or_file_path_dict: str | dict[str, str], storage: Storage
) -> dict[str, DownloadedData] | DownloadedData:
pass
@_download.register(str)
def _download_single_file(file_path: str, storage: Storage) -> DownloadedData:
if not storage.exists(file_path):
raise FileNotFoundError(f"File '{file_path}' does not exist in storage.")
data = storage.get_object(file_path)
logger.info(f"Downloaded {file_path} from storage.")
return DownloadedData(data=data, file_path=file_path)
@_download.register(dict)
def _download_multiple_files(file_path_dict: dict, storage: Storage) -> dict[str, DownloadedData]:
return {key: _download(value, storage) for key, value in file_path_dict.items()}
def upload_data_as_specified_in_message(storage: Storage, raw_payload: dict, data):
"""Convenience function to upload a file specified in a message payload. For now, only json serializable data is
supported. The storage json consists of the raw_payload, which is extended with a 'data' key, containing the
data to be uploaded.
If the content is not a json serializable object, an exception will be raised.
If the result file identifier specifies compression with gzip (.gz), it will be compressed before upload.
This function can be extended in the future as needed (e.g. if we need to upload images), but since further
requirements are not specified at this point in time, and it is unclear what these would entail, the code is kept
simple for now to improve readability, maintainability and avoid refactoring efforts of generic solutions that
weren't as generic as they seemed.
"""
try:
if "tenantId" in raw_payload and "dossierId" in raw_payload:
payload = TenantIdDossierIdFileIdUploadPayload(**raw_payload)
elif "tenantId" not in raw_payload and "dossierId" in raw_payload:
payload = DossierIdFileIdUploadPayload(**raw_payload)
else:
payload = TargetResponseFilePathUploadPayload(**raw_payload)
except ValidationError:
raise ValueError("No upload file path found in payload, nothing to upload.")
if ".json" not in payload.responseFilePath:
raise ValueError("Only json serializable data can be uploaded.")
data = {**raw_payload, "data": data}
data = json.dumps(data).encode("utf-8")
data = gzip.compress(data) if ".gz" in payload.responseFilePath else data
storage.put_object(payload.responseFilePath, data)
logger.info(f"Uploaded {payload.responseFilePath} to storage.")

View File

@ -1,21 +0,0 @@
import logging
from pyinfra.locations import BANNER_FILE
def show_banner():
with open(BANNER_FILE) as f:
banner = "\n" + "".join(f.readlines()) + "\n"
logger = logging.getLogger(__name__)
logger.propagate = False
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info(banner)

49
pyinfra/utils/cipher.py Normal file
View File

@ -0,0 +1,49 @@
import base64
import os
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
def build_aes_gcm_cipher(public_key, iv=None):
encoded_key = public_key.encode("utf-8")
kdf = PBKDF2HMAC(
algorithm=hashes.SHA1(),
length=16,
salt=iv,
iterations=65536,
)
private_key = kdf.derive(encoded_key)
return AESGCM(private_key)
def encrypt(public_key: str, plaintext: str, iv: int = None) -> str:
"""Encrypt a text with AES/GCS using a public key.
The byte-converted ciphertext consists of an unsigned 32-bit integer big-endian byteorder header i.e. the first 4
bytes, specifying the length of the following initialization vector (iv). The rest of the text contains the
encrypted message.
"""
iv = iv or os.urandom(12)
plaintext_bytes = plaintext.encode("utf-8")
cipher = build_aes_gcm_cipher(public_key, iv)
header = len(iv).to_bytes(length=4, byteorder="big")
encrypted = header + iv + cipher.encrypt(nonce=iv, data=plaintext_bytes, associated_data=None)
return base64.b64encode(encrypted).decode("utf-8")
def decrypt(public_key: str, ciphertext: str) -> str:
"""Decrypt an AES/GCS encrypted text with a public key.
The byte-converted ciphertext consists of an unsigned 32-bit integer big-endian byteorder header i.e. the first 4
bytes, specifying the length of the following initialization vector (iv). The rest of the text contains the
encrypted message.
"""
ciphertext_bytes = base64.b64decode(ciphertext)
header, rest = ciphertext_bytes[:4], ciphertext_bytes[4:]
iv_length = int.from_bytes(header, "big")
iv, ciphertext_bytes = rest[:iv_length], rest[iv_length:]
cipher = build_aes_gcm_cipher(public_key, iv)
decrypted_text = cipher.decrypt(nonce=iv, data=ciphertext_bytes, associated_data=None)
return decrypted_text.decode("utf-8")

View File

@ -0,0 +1,96 @@
import json
from azure.monitor.opentelemetry import configure_azure_monitor
from dynaconf import Dynaconf
from fastapi import FastAPI
from kn_utils.logging import logger
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.instrumentation.aio_pika import AioPikaInstrumentor
from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
from opentelemetry.instrumentation.pika import PikaInstrumentor
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import (
BatchSpanProcessor,
ConsoleSpanExporter,
SpanExporter,
SpanExportResult,
)
from pyinfra.config.loader import validate_settings
from pyinfra.config.validators import opentelemetry_validators
class JsonSpanExporter(SpanExporter):
def __init__(self):
self.traces = []
def export(self, spans):
for span in spans:
self.traces.append(json.loads(span.to_json()))
return SpanExportResult.SUCCESS
def shutdown(self):
pass
def setup_trace(settings: Dynaconf, service_name: str = None, exporter: SpanExporter = None):
tracing_type = settings.tracing.type
if tracing_type == "azure_monitor":
# Configure OpenTelemetry to use Azure Monitor with the
# APPLICATIONINSIGHTS_CONNECTION_STRING environment variable.
try:
configure_azure_monitor()
logger.info("Azure Monitor tracing enabled.")
except Exception as exception:
logger.warning(f"Azure Monitor tracing could not be enabled: {exception}")
elif tracing_type == "opentelemetry":
configure_opentelemtry_tracing(settings, service_name, exporter)
logger.info("OpenTelemetry tracing enabled.")
else:
logger.warning(f"Unknown tracing type: {tracing_type}. Tracing could not be enabled.")
def configure_opentelemtry_tracing(settings: Dynaconf, service_name: str = None, exporter: SpanExporter = None):
service_name = service_name or settings.tracing.opentelemetry.service_name
exporter = exporter or get_exporter(settings)
resource = Resource(attributes={"service.name": service_name})
provider = TracerProvider(resource=resource, shutdown_on_exit=True)
processor = BatchSpanProcessor(exporter)
provider.add_span_processor(processor)
# TODO: trace.set_tracer_provider produces a warning if trying to set the provider twice.
# "WARNING opentelemetry.trace:__init__.py:521 Overriding of current TracerProvider is not allowed"
# This doesn't seem to affect the functionality since we only want to use the tracer provided set in the beginning.
# We work around the log message by using the protected method with log=False.
trace._set_tracer_provider(provider, log=False)
def get_exporter(settings: Dynaconf):
validate_settings(settings, validators=opentelemetry_validators)
if settings.tracing.opentelemetry.exporter == "json":
return JsonSpanExporter()
elif settings.tracing.opentelemetry.exporter == "otlp":
return OTLPSpanExporter(endpoint=settings.tracing.opentelemetry.endpoint)
elif settings.tracing.opentelemetry.exporter == "console":
return ConsoleSpanExporter()
else:
raise ValueError(
f"Invalid OpenTelemetry exporter {settings.tracing.opentelemetry.exporter}. "
f"Valid values are 'json', 'otlp' and 'console'."
)
def instrument_pika(dynamic_queues: bool):
if dynamic_queues:
AioPikaInstrumentor().instrument()
else:
PikaInstrumentor().instrument()
def instrument_app(app: FastAPI, excluded_urls: str = "/health,/ready,/prometheus"):
FastAPIInstrumentor().instrument_app(app, excluded_urls=excluded_urls)

View File

@ -0,0 +1,40 @@
import re
from operator import truth
from typing import Tuple
from urllib.parse import urlparse
def make_url_validator(allowed_connection_schemes: tuple = ("http", "https")):
pattern = re.compile(
r"^(("
+ r"([A-Za-z]{3,9}:(?:\/\/)?)"
+ r"(?:[\-;:&=\+\$,\w]+@)?"
+ r"[A-Za-z0-9\.\-]+|(?:www\.|[\-;:&=\+\$,\w]+@)"
+ r"[A-Za-z0-9\.\-]+)"
+ r"((?:\/[\+~%\/\.\w\-_]*)?"
+ r"\??(?:[\-\+=&;%@\.\w_]*)#?(?:[\.\!\/\\\w]*))?)"
)
def inner(url: str):
url_is_valid = pattern.match(url)
parsed_url = urlparse(url)
endpoint_is_valid = truth(parsed_url.netloc)
protocol_is_valid = parsed_url.scheme in allowed_connection_schemes
return url_is_valid and endpoint_is_valid and protocol_is_valid
return inner
def validate_and_parse_s3_endpoint(endpoint: str) -> Tuple[bool, str]:
validate_url = make_url_validator()
if not validate_url(endpoint):
raise Exception(f"The s3 storage endpoint is not a valid url: {endpoint}")
parsed_url = urlparse(endpoint)
connection_is_secure = parsed_url.scheme == "https"
storage_endpoint = parsed_url.netloc
return connection_is_secure, storage_endpoint

View File

@ -1,91 +0,0 @@
import abc
import gzip
import json
import logging
from operator import itemgetter
from typing import Callable
from pyinfra.config import CONFIG, parse_disjunction_string
from pyinfra.exceptions import DataLoadingFailure
from pyinfra.storage.storage import Storage
def get_object_name(body):
dossier_id, file_id, target_file_extension = itemgetter("dossierId", "fileId", "targetFileExtension")(body)
object_name = f"{dossier_id}/{file_id}.{target_file_extension}"
return object_name
def get_response_object_name(body):
dossier_id, file_id, response_file_extension = itemgetter("dossierId", "fileId", "responseFileExtension")(body)
object_name = f"{dossier_id}/{file_id}.{response_file_extension}"
return object_name
def get_object_descriptor(body):
return {"bucket_name": parse_disjunction_string(CONFIG.storage.bucket), "object_name": get_object_name(body)}
def get_response_object_descriptor(body):
return {
"bucket_name": parse_disjunction_string(CONFIG.storage.bucket),
"object_name": get_response_object_name(body),
}
class ResponseStrategy(abc.ABC):
@abc.abstractmethod
def handle_response(self, body):
pass
def __call__(self, body):
return self.handle_response(body)
class StorageStrategy(ResponseStrategy):
def __init__(self, storage):
self.storage = storage
def handle_response(self, body):
self.storage.put_object(**get_response_object_descriptor(body), data=gzip.compress(json.dumps(body).encode()))
body.pop("data")
return body
class ForwardingStrategy(ResponseStrategy):
def handle_response(self, body):
return body
class QueueVisitor:
def __init__(self, storage: Storage, callback: Callable, response_strategy):
self.storage = storage
self.callback = callback
self.response_strategy = response_strategy
def load_data(self, body):
def download():
logging.debug(f"Downloading {object_descriptor}...")
data = self.storage.get_object(**object_descriptor)
logging.debug(f"Downloaded {object_descriptor}.")
return data
object_descriptor = get_object_descriptor(body)
try:
return gzip.decompress(download())
except Exception as err:
logging.warning(f"Loading data from storage failed for {object_descriptor}.")
raise DataLoadingFailure from err
def process_data(self, data, body):
return self.callback({**body, "data": data})
def load_and_process(self, body):
data = self.process_data(self.load_data(body), body)
result_body = {**body, "data": data}
return result_body
def __call__(self, body):
result_body = self.load_and_process(body)
return self.response_strategy(result_body)

View File

@ -0,0 +1,64 @@
from time import time
from typing import Callable, TypeVar
from dynaconf import Dynaconf
from fastapi import FastAPI
from funcy import identity
from prometheus_client import REGISTRY, CollectorRegistry, Summary, generate_latest
from starlette.responses import Response
from pyinfra.config.loader import validate_settings
from pyinfra.config.validators import prometheus_validators
def add_prometheus_endpoint(app: FastAPI, registry: CollectorRegistry = REGISTRY) -> FastAPI:
"""Add a prometheus endpoint to the app. It is recommended to use the default global registry.
You can register your own metrics with it anywhere, and they will be scraped with this endpoint.
See https://prometheus.io/docs/concepts/metric_types/ for the different metric types.
The implementation for monitoring the processing time of a function is in the decorator below (decorate the
processing function of a service to assess the processing time of each call).
The convention for the metric name is {product_name}_{service_name}_{parameter_to_monitor}.
"""
@app.get("/prometheus")
def prometheus_metrics():
return Response(generate_latest(registry), media_type="text/plain")
return app
Decorator = TypeVar("Decorator", bound=Callable[[Callable], Callable])
def make_prometheus_processing_time_decorator_from_settings(
settings: Dynaconf,
postfix: str = "processing_time",
registry: CollectorRegistry = REGISTRY,
) -> Decorator:
"""Make a decorator for monitoring the processing time of a function. This, and other metrics should follow the
convention {product name}_{service name}_{processing step / parameter to monitor}.
"""
validate_settings(settings, validators=prometheus_validators)
processing_time_sum = Summary(
f"{settings.metrics.prometheus.prefix}_{postfix}",
"Summed up processing time per call.",
registry=registry,
)
def decorator(process_fn: Callable) -> Callable:
def inner(*args, **kwargs):
start = time()
result = process_fn(*args, **kwargs)
runtime = time() - start
processing_time_sum.observe(runtime)
return result
return inner
return decorator

103
pyinfra/webserver/utils.py Normal file
View File

@ -0,0 +1,103 @@
import asyncio
import inspect
import logging
import signal
import threading
import time
from typing import Callable
import uvicorn
from dynaconf import Dynaconf
from fastapi import FastAPI
from kn_utils.logging import logger
from kn_utils.retry import retry
from pyinfra.config.loader import validate_settings
from pyinfra.config.validators import webserver_validators
class PyInfraUvicornServer(uvicorn.Server):
# this is a workaround to enable custom signal handlers
# https://github.com/encode/uvicorn/issues/1579
def install_signal_handlers(self):
pass
@retry(
tries=5,
exceptions=Exception,
reraise=True,
)
def create_webserver_thread_from_settings(app: FastAPI, settings: Dynaconf) -> threading.Thread:
validate_settings(settings, validators=webserver_validators)
return create_webserver_thread(app=app, port=settings.webserver.port, host=settings.webserver.host)
def create_webserver_thread(app: FastAPI, port: int, host: str) -> threading.Thread:
"""Creates a thread that runs a FastAPI webserver. Start with thread.start(), and join with thread.join().
Note that the thread is a daemon thread, so it will be terminated when the main thread is terminated.
"""
def run_server():
retries = 5
for attempt in range(retries):
try:
uvicorn.run(app, port=port, host=host, log_level=logging.WARNING)
break
except Exception as e:
if attempt < retries - 1: # if it's not the last attempt
logger.warning(f"Attempt {attempt + 1} failed to start the server: {e}. Retrying...")
time.sleep(2**attempt) # exponential backoff
else:
logger.error(f"Failed to start the server after {retries} attempts: {e}")
raise
thread = threading.Thread(target=run_server)
thread.daemon = True
return thread
async def run_async_webserver(app: FastAPI, port: int, host: str):
"""Run the FastAPI web server async."""
config = uvicorn.Config(app, host=host, port=port, log_level=logging.WARNING)
server = PyInfraUvicornServer(config)
try:
await server.serve()
except asyncio.CancelledError:
logger.debug("Webserver was cancelled.")
server.should_exit = True
await server.shutdown()
except Exception as e:
logger.error(f"Error while running the webserver: {e}", exc_info=True)
finally:
logger.info("Webserver has been shut down.")
HealthFunction = Callable[[], bool]
def add_health_check_endpoint(app: FastAPI, health_function: HealthFunction) -> FastAPI:
"""Add a health check endpoint to the app. The health function should return True if the service is healthy,
and False otherwise. The health function is called when the endpoint is hit.
"""
if inspect.iscoroutinefunction(health_function):
@app.get("/health")
@app.get("/ready")
async def async_check_health():
alive = await health_function()
if alive:
return {"status": "OK"}, 200
return {"status": "Service Unavailable"}, 503
else:
@app.get("/health")
@app.get("/ready")
def check_health():
if health_function():
return {"status": "OK"}, 200
return {"status": "Service Unavailable"}, 503
return app

103
pyproject.toml Normal file
View File

@ -0,0 +1,103 @@
[tool.poetry]
name = "pyinfra"
version = "4.1.0"
description = ""
authors = ["Team Research <research@knecon.com>"]
license = "All rights reseverd"
[tool.poetry.dependencies]
python = ">=3.10,<3.11"
# infra, deployment
pika = "^1.3"
retry = "^0.9"
minio = "^7.1"
prometheus-client = "^0.18"
# azure
azure-core = "^1.29"
azure-storage-blob = "^12.13"
# misc utils
funcy = "^2"
pycryptodome = "^3.19"
fastapi = "^0.109.0"
uvicorn = "^0.26.0"
# DONT USE GROUPS BECAUSE THEY ARE NOT INSTALLED FOR PACKAGES
# [tool.poetry.group.internal.dependencies] <<< THIS IS NOT WORKING
kn-utils = { version = ">=0.4.0", source = "nexus" }
# We set all opentelemetry dependencies to lower bound because the image classification service depends on a protobuf version <4, but does not use proto files.
# Therefore, we allow latest possible protobuf version in the services which use proto files. As soon as the dependency issue is fixed set this to the latest possible opentelemetry version
opentelemetry-instrumentation-pika = ">=0.46b0,<0.50"
opentelemetry-exporter-otlp = ">=1.25.0,<1.29"
opentelemetry-instrumentation = ">=0.46b0,<0.50"
opentelemetry-api = ">=1.25.0,<1.29"
opentelemetry-sdk = ">=1.25.0,<1.29"
opentelemetry-exporter-otlp-proto-http = ">=1.25.0,<1.29"
opentelemetry-instrumentation-flask = ">=0.46b0,<0.50"
opentelemetry-instrumentation-requests = ">=0.46b0,<0.50"
opentelemetry-instrumentation-fastapi = ">=0.46b0,<0.50"
opentelemetry-instrumentation-aio-pika = ">=0.46b0,<0.50"
wcwidth = "<=0.2.12"
azure-monitor-opentelemetry = "^1.6.0"
aio-pika = "^9.4.2"
aiohttp = "^3.9.5"
# THIS IS NOT AVAILABLE FOR SERVICES THAT IMPLEMENT PYINFRA
[tool.poetry.group.dev.dependencies]
pytest = "^7"
ipykernel = "^6.26.0"
black = "^24.10"
pylint = "^3"
coverage = "^7.3"
requests = "^2.31"
pre-commit = "^3.6.0"
cyclonedx-bom = "^4.1.1"
dvc = "^3.51.2"
dvc-azure = "^3.1.0"
deepdiff = "^7.0.1"
pytest-cov = "^5.0.0"
[tool.pytest.ini_options]
minversion = "6.0"
addopts = "-ra -q"
testpaths = ["tests", "integration"]
log_cli = 1
log_cli_level = "DEBUG"
[tool.mypy]
exclude = ['.venv']
[tool.black]
line-length = 120
target-version = ["py310"]
[tool.isort]
profile = "black"
[tool.pylint.format]
max-line-length = 120
disable = [
"C0114",
"C0325",
"R0801",
"R0902",
"R0903",
"R0904",
"R0913",
"R0914",
"W0511",
]
docstring-min-length = 3
[[tool.poetry.source]]
name = "pypi-proxy"
url = "https://nexus.knecon.com/repository/pypi-proxy/simple"
priority = "primary"
[[tool.poetry.source]]
name = "nexus"
url = "https://nexus.knecon.com/repository/python/simple"
priority = "explicit"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

View File

@ -1,4 +0,0 @@
[pytest]
log_cli = 1
log_cli_level = DEBUG

View File

@ -1,15 +0,0 @@
pika==1.2.0
retry==0.9.2
envyaml==1.10.211231
minio==7.1.3
Flask==2.1.1
waitress==2.0.0
azure-core==1.22.1
azure-storage-blob==12.9.0
requests==2.27.1
testcontainers==3.4.2
docker-compose==1.29.2
tqdm==4.62.3
pytest~=7.0.1
funcy==1.17
fpdf==1.7.2

View File

@ -1,9 +0,0 @@
echo "${bamboo_nexus_password}" | docker login --username "${bamboo_nexus_user}" --password-stdin nexus.iqser.com:5001
docker build -f Dockerfile_tests -t pyinfra-tests .
rnd=$(date +"%s")
name=pyinfra-tests-${rnd}
echo "running tests container"
docker run --rm --net=host --name $name -v $PWD:$PWD -w $PWD -v /var/run/docker.sock:/var/run/docker.sock pyinfra-tests

View File

@ -1,72 +0,0 @@
import argparse
import gzip
import os
from pathlib import Path
from tqdm import tqdm
from pyinfra.config import CONFIG, parse_disjunction_string
from pyinfra.storage.storages import get_s3_storage
def parse_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help="sub-command help", dest="command")
parser_add = subparsers.add_parser("add", help="Add file(s) to the MinIO store")
parser_add.add_argument("dossier_id")
add_group = parser_add.add_mutually_exclusive_group(required=True)
add_group.add_argument("--file", "-f")
add_group.add_argument("--directory", "-d")
subparsers.add_parser("purge", help="Delete all files and buckets in the MinIO store")
args = parser.parse_args()
return args
def combine_dossier_id_and_file_id_and_extension(dossier_id, file_id, extension):
return f"{dossier_id}/{file_id}{extension}"
def upload_compressed_response(storage, bucket_name, dossier_id, file_id, result) -> None:
data = gzip.compress(result.encode())
path_gz = combine_dossier_id_and_file_id_and_extension(dossier_id, file_id, CONFIG.service.response.extension)
storage.put_object(bucket_name, path_gz, data)
def add_file_compressed(storage, bucket_name, dossier_id, path) -> None:
if Path(path).suffix == ".pdf":
suffix_gz = ".ORIGIN.pdf.gz"
if Path(path).suffix == ".json":
suffix_gz = ".TEXT.json.gz"
path_gz = combine_dossier_id_and_file_id_and_extension(dossier_id, Path(path).stem, suffix_gz)
with open(path, "rb") as f:
data = gzip.compress(f.read())
storage.put_object(bucket_name, path_gz, data)
if __name__ == "__main__":
storage = get_s3_storage()
bucket_name = parse_disjunction_string(CONFIG.storage.bucket)
if not storage.has_bucket(bucket_name):
storage.make_bucket(bucket_name)
args = parse_args()
if args.command == "add":
if args.file:
add_file_compressed(storage, bucket_name, args.dossier_id, args.file)
elif args.directory:
for fname in tqdm([*os.listdir(args.directory)], desc="Adding files"):
path = Path(args.directory) / fname
add_file_compressed(storage, bucket_name, args.dossier_id, path)
elif args.command == "purge":
storage.clear_bucket(bucket_name)

View File

@ -1,88 +0,0 @@
import argparse
import json
import pika
from pyinfra.config import CONFIG, parse_disjunction_string
from pyinfra.storage.storages import get_s3_storage
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--bucket_name", "-b", required=True)
parser.add_argument("--analysis_container", "-a", choices=["detr", "ner", "image", "dl_error"], required=True)
args = parser.parse_args()
return args
def read_connection_params():
credentials = pika.PlainCredentials(CONFIG.rabbitmq.user, CONFIG.rabbitmq.password)
parameters = pika.ConnectionParameters(
host=CONFIG.rabbitmq.host,
port=CONFIG.rabbitmq.port,
heartbeat=CONFIG.rabbitmq.heartbeat,
credentials=credentials,
)
return parameters
def make_channel(connection) -> pika.adapters.blocking_connection.BlockingChannel:
channel = connection.channel()
channel.basic_qos(prefetch_count=1)
return channel
def declare_queue(channel, queue: str):
args = {"x-dead-letter-exchange": "", "x-dead-letter-routing-key": CONFIG.rabbitmq.queues.dead_letter}
return channel.queue_declare(queue=queue, auto_delete=False, durable=True, arguments=args)
def make_connection() -> pika.BlockingConnection:
parameters = read_connection_params()
connection = pika.BlockingConnection(parameters)
return connection
def build_message_bodies(analyse_container_type, bucket_name):
def update_message(message_dict):
if analyse_container_type == "detr" or analyse_container_type == "image":
message_dict.update({"targetFileExtension": "ORIGIN.pdf.gz", "responseFileExtension": "IMAGE_INFO.json.gz"})
if analyse_container_type == "dl_error":
message_dict.update({"targetFileExtension": "no_such_file", "responseFileExtension": "IMAGE_INFO.json.gz"})
if analyse_container_type == "ner":
message_dict.update(
{"targetFileExtension": "TEXT.json.gz", "responseFileExtension": "NER_ENTITIES.json.gz"}
)
return message_dict
storage = get_s3_storage()
for bucket_name, pdf_name in storage.get_all_object_names(bucket_name):
if "pdf" not in pdf_name:
continue
file_id = pdf_name.split(".")[0]
dossier_id, file_id = file_id.split("/")
message_dict = {"dossierId": dossier_id, "fileId": file_id}
update_message(message_dict)
yield json.dumps(message_dict).encode()
def main(args):
connection = make_connection()
channel = make_channel(connection)
declare_queue(channel, CONFIG.rabbitmq.queues.input)
declare_queue(channel, CONFIG.rabbitmq.queues.output)
for body in build_message_bodies(args.analysis_container, args.bucket_name):
channel.basic_publish("", CONFIG.rabbitmq.queues.input, body)
print(f"Put {body} on {CONFIG.rabbitmq.queues.input}")
for method_frame, _, body in channel.consume(queue=CONFIG.rabbitmq.queues.output, inactivity_timeout=1):
if not body:
break
print(f"Received {json.loads(body)}")
channel.basic_ack(method_frame.delivery_tag)
channel.close()
if __name__ == "__main__":
main(parse_args())

View File

@ -0,0 +1,150 @@
import asyncio
import gzip
import json
from operator import itemgetter
from typing import Any, Dict
from aio_pika import Message
from aio_pika.abc import AbstractIncomingMessage
from kn_utils.logging import logger
from pyinfra.config.loader import load_settings, local_pyinfra_root_path
from pyinfra.queue.async_manager import AsyncQueueManager, RabbitMQConfig
from pyinfra.storage.storages.s3 import S3Storage, get_s3_storage_from_settings
settings = load_settings(local_pyinfra_root_path / "config/")
async def dummy_message_processor(message: Dict[str, Any]) -> Dict[str, Any]:
logger.info(f"Processing message: {message}")
# await asyncio.sleep(1) # Simulate processing time
storage = get_s3_storage_from_settings(settings)
tenant_id, dossier_id, file_id = itemgetter("tenantId", "dossierId", "fileId")(message)
suffix = message["responseFileExtension"]
object_name = f"{tenant_id}/{dossier_id}/{file_id}.{message['targetFileExtension']}"
original_content = json.loads(gzip.decompress(storage.get_object(object_name)))
processed_content = {
"processedPages": original_content["numberOfPages"],
"processedSectionTexts": f"Processed: {original_content['sectionTexts']}",
}
processed_object_name = f"{tenant_id}/{dossier_id}/{file_id}.{suffix}"
processed_data = gzip.compress(json.dumps(processed_content).encode("utf-8"))
storage.put_object(processed_object_name, processed_data)
processed_message = message.copy()
processed_message["processed"] = True
processed_message["processor_message"] = "This message was processed by the dummy processor"
logger.info(f"Finished processing message. Result: {processed_message}")
return processed_message
async def on_response_message_callback(storage: S3Storage):
async def on_message(message: AbstractIncomingMessage) -> None:
async with message.process(ignore_processed=True):
if not message.body:
raise ValueError
response = json.loads(message.body)
logger.info(f"Received {response}")
logger.info(f"Message headers: {message.properties.headers}")
await message.ack()
tenant_id, dossier_id, file_id = itemgetter("tenantId", "dossierId", "fileId")(response)
suffix = response["responseFileExtension"]
result = storage.get_object(f"{tenant_id}/{dossier_id}/{file_id}.{suffix}")
result = json.loads(gzip.decompress(result))
logger.info(f"Contents of result on storage: {result}")
return on_message
def upload_json_and_make_message_body(tenant_id: str):
dossier_id, file_id, suffix = "dossier", "file", "json.gz"
content = {
"numberOfPages": 7,
"sectionTexts": "data",
}
object_name = f"{tenant_id}/{dossier_id}/{file_id}.{suffix}"
data = gzip.compress(json.dumps(content).encode("utf-8"))
storage = get_s3_storage_from_settings(settings)
if not storage.has_bucket():
storage.make_bucket()
storage.put_object(object_name, data)
message_body = {
"tenantId": tenant_id,
"dossierId": dossier_id,
"fileId": file_id,
"targetFileExtension": suffix,
"responseFileExtension": f"result.{suffix}",
}
return message_body, storage
async def test_rabbitmq_handler() -> None:
tenant_service_url = settings.storage.tenant_server.endpoint
config = RabbitMQConfig(
host=settings.rabbitmq.host,
port=settings.rabbitmq.port,
username=settings.rabbitmq.username,
password=settings.rabbitmq.password,
heartbeat=settings.rabbitmq.heartbeat,
input_queue_prefix=settings.rabbitmq.service_request_queue_prefix,
tenant_event_queue_suffix=settings.rabbitmq.tenant_event_queue_suffix,
tenant_exchange_name=settings.rabbitmq.tenant_exchange_name,
service_request_exchange_name=settings.rabbitmq.service_request_exchange_name,
service_response_exchange_name=settings.rabbitmq.service_response_exchange_name,
service_dead_letter_queue_name=settings.rabbitmq.service_dlq_name,
queue_expiration_time=settings.rabbitmq.queue_expiration_time,
pod_name=settings.kubernetes.pod_name,
)
handler = AsyncQueueManager(config, tenant_service_url, dummy_message_processor)
await handler.connect()
await handler.setup_exchanges()
tenant_id = "test_tenant"
# Test tenant creation
create_message = {"tenantId": tenant_id}
await handler.tenant_exchange.publish(
Message(body=json.dumps(create_message).encode()), routing_key="tenant.created"
)
logger.info(f"Sent create tenant message for {tenant_id}")
await asyncio.sleep(0.5) # Wait for queue creation
# Prepare service request
service_request, storage = upload_json_and_make_message_body(tenant_id)
# Test service request
await handler.input_exchange.publish(Message(body=json.dumps(service_request).encode()), routing_key=tenant_id)
logger.info(f"Sent service request for {tenant_id}")
await asyncio.sleep(5) # Wait for message processing
# Consume service request
response_queue = await handler.channel.declare_queue(name=f"response_queue_{tenant_id}")
await response_queue.bind(exchange=handler.output_exchange, routing_key=tenant_id)
callback = await on_response_message_callback(storage)
await response_queue.consume(callback=callback)
await asyncio.sleep(5) # Wait for message processing
# Test tenant deletion
delete_message = {"tenantId": tenant_id}
await handler.tenant_exchange.publish(
Message(body=json.dumps(delete_message).encode()), routing_key="tenant.delete"
)
logger.info(f"Sent delete tenant message for {tenant_id}")
await asyncio.sleep(0.5) # Wait for queue deletion
await handler.connection.close()
if __name__ == "__main__":
asyncio.run(test_rabbitmq_handler())

67
scripts/send_request.py Normal file
View File

@ -0,0 +1,67 @@
import gzip
import json
from operator import itemgetter
from kn_utils.logging import logger
from pyinfra.config.loader import load_settings, local_pyinfra_root_path
from pyinfra.queue.manager import QueueManager
from pyinfra.storage.storages.s3 import get_s3_storage_from_settings
settings = load_settings(local_pyinfra_root_path / "config/")
def upload_json_and_make_message_body():
dossier_id, file_id, suffix = "dossier", "file", "json.gz"
content = {
"numberOfPages": 7,
"sectionTexts": "data",
}
object_name = f"{dossier_id}/{file_id}.{suffix}"
data = gzip.compress(json.dumps(content).encode("utf-8"))
storage = get_s3_storage_from_settings(settings)
if not storage.has_bucket():
storage.make_bucket()
storage.put_object(object_name, data)
message_body = {
"dossierId": dossier_id,
"fileId": file_id,
"targetFileExtension": suffix,
"responseFileExtension": f"result.{suffix}",
}
return message_body
def main():
queue_manager = QueueManager(settings)
queue_manager.purge_queues()
message = upload_json_and_make_message_body()
queue_manager.publish_message_to_input_queue(message)
logger.info(f"Put {message} on {settings.rabbitmq.input_queue}.")
storage = get_s3_storage_from_settings(settings)
for method_frame, properties, body in queue_manager.channel.consume(
queue=settings.rabbitmq.output_queue, inactivity_timeout=15
):
if not body:
break
response = json.loads(body)
logger.info(f"Received {response}")
logger.info(f"Message headers: {properties.headers}")
queue_manager.channel.basic_ack(method_frame.delivery_tag)
dossier_id, file_id = itemgetter("dossierId", "fileId")(response)
suffix = message["responseFileExtension"]
print(f"{dossier_id}/{file_id}.{suffix}")
result = storage.get_object(f"{dossier_id}/{file_id}.{suffix}")
result = json.loads(gzip.decompress(result))
logger.info(f"Contents of result on storage: {result}")
queue_manager.stop_consuming()
if __name__ == "__main__":
main()

17
scripts/send_sigterm.py Normal file
View File

@ -0,0 +1,17 @@
import os
import signal
import time
# BE CAREFUL WITH THIS SCRIPT - THIS SIMULATES A SIGTERM FROM KUBERNETES
target_pid = int(input("Enter the PID of the target script: "))
print(f"Sending SIGTERM to PID {target_pid}...")
time.sleep(1)
try:
os.kill(target_pid, signal.SIGTERM)
print("SIGTERM sent.")
except ProcessLookupError:
print("Process not found.")
except PermissionError:
print("Permission denied. Are you trying to signal a process you don't own?")

View File

@ -0,0 +1,39 @@
#!/bin/bash
python_version=$1
nexus_user=$2
nexus_password=$3
# cookiecutter https://gitlab.knecon.com/knecon/research/template-python-project.git --checkout master
# latest_dir=$(ls -td -- */ | head -n 1) # should be the dir cookiecutter just created
# cd $latest_dir
pyenv install $python_version
pyenv local $python_version
pyenv shell $python_version
# install poetry globally (PREFERRED), only need to install it once
# curl -sSL https://install.python-poetry.org | python3 -
# remember to update poetry once in a while
poetry self update
# install poetry in current python environment, can lead to multiple instances of poetry being installed on one system (DISPREFERRED)
# pip install --upgrade pip
# pip install poetry
poetry config virtualenvs.in-project true
poetry config installer.max-workers 10
poetry config repositories.pypi-proxy "https://nexus.knecon.com/repository/pypi-proxy/simple"
poetry config http-basic.pypi-proxy ${nexus_user} ${nexus_password}
poetry config repositories.nexus https://nexus.knecon.com/repository/python/simple
poetry config http-basic.nexus ${nexus_user} ${nexus_password}
poetry env use $(pyenv which python)
poetry install --with=dev
poetry update
source .venv/bin/activate
pre-commit install
pre-commit autoupdate

18
scripts/start_pyinfra.py Normal file
View File

@ -0,0 +1,18 @@
import time
from pyinfra.config.loader import load_settings, parse_settings_path
from pyinfra.examples import start_standard_queue_consumer
from pyinfra.queue.callback import make_download_process_upload_callback
def processor_mock(_data: dict, _message: dict) -> dict:
time.sleep(5)
return {"result1": "result1"}
if __name__ == "__main__":
arguments = parse_settings_path()
settings = load_settings(arguments)
callback = make_download_process_upload_callback(processor_mock, settings)
start_standard_queue_consumer(callback, settings)

View File

@ -1,13 +0,0 @@
#!/usr/bin/env python
from distutils.core import setup
setup(
name="pyinfra",
version="0.0.1",
description="",
author="",
author_email="",
url="",
packages=["pyinfra"],
)

View File

@ -1,4 +0,0 @@
sonar.exclusions=bamboo-specs/**, build_venv/**
sonar.c.file.suffixes=-
sonar.cpp.file.suffixes=-
sonar.objc.file.suffixes=-

View File

@ -1,81 +0,0 @@
import logging
from multiprocessing import Process
import requests
from retry import retry
from pyinfra.config import CONFIG
from pyinfra.exceptions import AnalysisFailure, ConsumerError
from pyinfra.flask import run_probing_webserver, set_up_probing_webserver
from pyinfra.queue.consumer import Consumer
from pyinfra.queue.queue_manager.pika_queue_manager import PikaQueueManager
from pyinfra.storage.storages import get_storage
from pyinfra.utils.banner import show_banner
from pyinfra.visitor import QueueVisitor, StorageStrategy
def make_callback(analysis_endpoint):
def callback(message):
def perform_operation(operation):
endpoint = f"{analysis_endpoint}/{operation}"
try:
logging.debug(f"Requesting analysis from {endpoint}...")
analysis_response = requests.post(endpoint, data=message["data"])
analysis_response.raise_for_status()
analysis_response = analysis_response.json()
logging.debug(f"Received response.")
return analysis_response
except Exception as err:
logging.warning(f"Exception caught when calling analysis endpoint {endpoint}.")
raise AnalysisFailure() from err
operations = message.get("operations", ["/"])
results = map(perform_operation, operations)
result = dict(zip(operations, results))
if list(result.keys()) == ["/"]:
result = list(result.values())[0]
return result
return callback
def main():
show_banner()
webserver = Process(target=run_probing_webserver, args=(set_up_probing_webserver(),))
logging.info("Starting webserver...")
webserver.start()
callback = make_callback(CONFIG.rabbitmq.callback.analysis_endpoint)
storage = get_storage(CONFIG.storage.backend)
response_strategy = StorageStrategy(storage)
visitor = QueueVisitor(storage, callback, response_strategy)
@retry(ConsumerError, tries=3, delay=5, jitter=(1, 3))
def consume():
try: # RED-4049 queue manager needs to be in try scope to eventually throw Exception after connection loss.
queue_manager = PikaQueueManager(CONFIG.rabbitmq.queues.input, CONFIG.rabbitmq.queues.output)
consumer = Consumer(visitor, queue_manager)
consumer.basic_consume_and_publish()
except Exception as err:
raise ConsumerError from err
try:
consume()
except KeyboardInterrupt:
pass
except ConsumerError:
webserver.terminate()
raise
webserver.join()
if __name__ == "__main__":
logging_level = CONFIG.service.logging_level
logging.basicConfig(level=logging_level)
logging.getLogger("pika").setLevel(logging.ERROR)
logging.getLogger("flask").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
main()

View File

View File

@ -1,5 +0,0 @@
from pyinfra.config import Config
from pyinfra.locations import TEST_CONFIG_FILE
CONFIG = Config(TEST_CONFIG_FILE)

View File

@ -1,25 +0,0 @@
storage:
minio:
endpoint: "http://127.0.0.1:9000"
access_key: root
secret_key: password
region: null
aws:
endpoint: https://s3.amazonaws.com
access_key: AKIA4QVP6D4LCDAGYGN2
secret_key: 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
region: $STORAGE_REGION|"eu-west-1"
azure:
connection_string: "DefaultEndpointsProtocol=https;AccountName=iqserdevelopment;AccountKey=4imAbV9PYXaztSOMpIyAClg88bAZCXuXMGJG0GA1eIBpdh2PlnFGoRBnKqLy2YZUSTmZ3wJfC7tzfHtuC6FEhQ==;EndpointSuffix=core.windows.net"
bucket: "pyinfra-test-bucket"
webserver:
host: $SERVER_HOST|"127.0.0.1" # webserver address
port: $SERVER_PORT|5000 # webserver port
mode: $SERVER_MODE|production # webserver mode: {development, production}
mock_analysis_endpoint: "http://127.0.0.1:5000"

View File

@ -1,79 +0,0 @@
import json
from operator import itemgetter
import pytest
from flask import Flask, request, jsonify
import fpdf
def set_up_processing_server():
app = Flask(__name__)
@app.route("/ready", methods=["GET"])
def ready():
resp = jsonify("OK")
resp.status_code = 200
return resp
@app.route("/process", methods=["POST"])
def process():
payload = json.loads(request.json)
data = payload["data"].encode()
metadata = payload["metadata"]
response_payload = {"metadata_type": str(type(metadata)), "data_type": str(type(data))}
return jsonify(response_payload)
return app
@pytest.fixture
def server():
server = set_up_processing_server()
server.config.update({"TESTING": True})
return server
@pytest.fixture
def client(server):
return server.test_client()
def test_server_ready_check(client):
response = client.get("/ready")
assert response.status_code == 200
assert response.json == "OK"
@pytest.mark.parametrize("data_type", ["pdf", "bytestring"])
def test_sending_bytes_through_json(client, data):
payload = {"data": data.decode("latin1"), "metadata": {"A": 1, "B": [2, 3]}}
response = client.post("/process", json=json.dumps(payload))
response_payload = response.json
data_type, metadata_type = itemgetter("data_type", "metadata_type")(response_payload)
assert data_type == "<class 'bytes'>"
assert metadata_type == "<class 'dict'>"
@pytest.fixture
def pdf():
pdf = fpdf.FPDF(unit="pt")
pdf.add_page()
return pdf_stream(pdf)
def pdf_stream(pdf: fpdf.fpdf.FPDF):
return pdf.output(dest="S").encode("latin1")
@pytest.fixture
def data(data_type, pdf):
if data_type == "pdf":
return pdf
elif data_type == "bytestring":
return "content".encode("latin1")

View File

View File

@ -1,46 +0,0 @@
from pyinfra.queue.queue_manager.queue_manager import QueueManager, QueueHandle
from test.queue.queue_mock import QueueMock
def monkey_patch_queue_handle(queue) -> QueueHandle:
queue_handle = QueueHandle()
queue_handle.empty = lambda: not queue
queue_handle.to_list = lambda: list(queue)
return queue_handle
class QueueManagerMock(QueueManager):
def __init__(self, input_queue, output_queue):
super().__init__(QueueMock(), QueueMock())
def publish_request(self, request):
self._input_queue.append(request)
def publish_response(self, message, callback):
self._output_queue.append(callback(message))
def pull_request(self):
return self._input_queue.popleft()
def consume(self, **kwargs):
while self._input_queue:
yield self.pull_request()
def consume_and_publish(self, callback):
for message in self.consume():
self.publish_response(message, callback)
def basic_consume_and_publish(self, callback):
raise NotImplementedError
def clear(self):
self._input_queue.clear()
self._output_queue.clear()
@property
def input_queue(self) -> QueueHandle:
return monkey_patch_queue_handle(self._input_queue)
@property
def output_queue(self) -> QueueHandle:
return monkey_patch_queue_handle(self._output_queue)

View File

@ -1,5 +0,0 @@
from collections import deque
class QueueMock(deque):
pass

View File

@ -1,30 +0,0 @@
from pyinfra.storage.adapters.adapter import StorageAdapter
from test.storage.client_mock import StorageClientMock
class StorageAdapterMock(StorageAdapter):
def __init__(self, client: StorageClientMock):
assert isinstance(client, StorageClientMock)
super().__init__(client=client)
self.__client = self._StorageAdapter__client
def make_bucket(self, bucket_name):
self.__client.make_bucket(bucket_name)
def has_bucket(self, bucket_name):
return self.__client.has_bucket(bucket_name)
def put_object(self, bucket_name, object_name, data):
return self.__client.put_object(bucket_name, object_name, data)
def get_object(self, bucket_name, object_name):
return self.__client.get_object(bucket_name, object_name)
def get_all_objects(self, bucket_name):
return self.__client.get_all_objects(bucket_name)
def clear_bucket(self, bucket_name):
return self.__client.clear_bucket(bucket_name)
def get_all_object_names(self, bucket_name):
return self.__client.get_all_object_names(bucket_name)

View File

@ -1,27 +0,0 @@
from itertools import repeat
class StorageClientMock:
def __init__(self):
self.__data = {}
def make_bucket(self, bucket_name):
self.__data[bucket_name] = {}
def has_bucket(self, bucket_name):
return bucket_name in self.__data
def put_object(self, bucket_name, object_name, data):
self.__data[bucket_name][object_name] = data
def get_object(self, bucket_name, object_name):
return self.__data[bucket_name][object_name]
def get_all_objects(self, bucket_name):
return self.__data[bucket_name].values()
def clear_bucket(self, bucket_name):
self.__data[bucket_name] = {}
def get_all_object_names(self, bucket_name):
return zip(repeat(bucket_name), self.__data[bucket_name])

View File

@ -1,10 +0,0 @@
import pytest
from pyinfra.storage.adapters.azure import AzureStorageAdapter
from test.storage.client_mock import StorageClientMock
@pytest.fixture
def adapter():
adapter = AzureStorageAdapter(StorageClientMock())
return adapter

View File

@ -1,45 +0,0 @@
import os
import tempfile
import pytest
import yaml
from pyinfra.config import Config, parse_disjunction_string
@pytest.fixture
def config_file_content():
return {"A": [{"B": [1, 2]}, {"C": 3}, 4], "D": {"E": {"F": True}}}
@pytest.fixture
def config(config_file_content):
with tempfile.NamedTemporaryFile(suffix=".yaml", mode="w") as f:
yaml.dump(config_file_content, f, default_flow_style=False)
yield Config(f.name)
def test_dot_access_key_exists(config):
assert config.A == [{"B": [1, 2]}, {"C": 3}, 4]
assert config.D.E["F"]
def test_access_key_exists(config):
assert config["A"] == [{"B": [1, 2]}, {"C": 3}, 4]
assert config["A"][0] == {"B": [1, 2]}
assert config["A"][0]["B"] == [1, 2]
assert config["A"][0]["B"][0] == 1
def test_dot_access_key_does_not_exists(config):
assert config.B is None
def test_access_key_does_not_exists(config):
assert config["B"] is None
def test_parse_disjunction_string():
assert parse_disjunction_string("A|Bb|c") == "c"
os.environ["Bb"] = "d"
assert parse_disjunction_string("A|Bb|c") == "d"

View File

@ -1,160 +0,0 @@
import json
import logging
import time
from unittest.mock import Mock
import pika
import pytest
import testcontainers.compose
from pyinfra.exceptions import UnknownClient
from pyinfra.locations import TEST_DIR, COMPOSE_PATH
from pyinfra.queue.queue_manager.pika_queue_manager import PikaQueueManager, get_connection_params
from pyinfra.queue.queue_manager.queue_manager import QueueManager
from pyinfra.storage.adapters.azure import AzureStorageAdapter
from pyinfra.storage.adapters.s3 import S3StorageAdapter
from pyinfra.storage.clients.azure import get_azure_client
from pyinfra.storage.clients.s3 import get_s3_client
from pyinfra.storage.storage import Storage
from test.config import CONFIG
from test.queue.queue_manager_mock import QueueManagerMock
from test.storage.adapter_mock import StorageAdapterMock
from test.storage.client_mock import StorageClientMock
from pyinfra.visitor import StorageStrategy, ForwardingStrategy, QueueVisitor
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@pytest.fixture(scope="session")
def bucket_name():
return "pyinfra-test-bucket"
@pytest.fixture
def storage_data():
with open(f"{TEST_DIR}/test_data/test_data.TEXT.json", "r") as f:
data = json.load(f)
return data
@pytest.fixture
def mock_response(storage_data):
response = Mock(status_code=200)
response.json.return_value = storage_data
return response
@pytest.fixture
def mock_payload():
return json.dumps({"dossierId": "test", "fileId": "test"})
@pytest.fixture
def mock_make_load_data():
def load_data(payload):
return storage_data
return load_data
@pytest.fixture(params=["minio", "aws"], scope="session")
def storage(client_name, bucket_name, request):
logger.debug("Setup for storage")
storage = Storage(get_adapter(client_name, request.param))
storage.make_bucket(bucket_name)
storage.clear_bucket(bucket_name)
yield storage
logger.debug("Teardown for storage")
storage.clear_bucket(bucket_name)
@pytest.fixture(scope="session", autouse=True)
def docker_compose(sleep_seconds=30):
logger.info(f"Starting docker containers with {COMPOSE_PATH}/docker-compose.yml...")
compose = testcontainers.compose.DockerCompose(COMPOSE_PATH, compose_file_name="docker-compose.yml")
compose.start()
logger.info(f"Sleeping for {sleep_seconds} seconds to wait for containers to finish startup... ")
time.sleep(sleep_seconds)
yield compose
compose.stop()
def get_pika_connection_params():
params = get_connection_params()
return params
def get_s3_params(s3_backend):
params = CONFIG.storage[s3_backend]
return params
def get_adapter(client_name, s3_backend):
if client_name == "mock":
return StorageAdapterMock(StorageClientMock())
if client_name == "azure":
return AzureStorageAdapter(get_azure_client(CONFIG.storage.azure.connection_string))
if client_name == "s3":
return S3StorageAdapter(get_s3_client(get_s3_params(s3_backend)))
else:
raise UnknownClient(client_name)
def get_queue_manager(queue_manager_name) -> QueueManager:
if queue_manager_name == "mock":
return QueueManagerMock("input", "output")
if queue_manager_name == "pika":
return PikaQueueManager("input", "output", connection_params=get_pika_connection_params())
@pytest.fixture(scope="session")
def queue_manager(queue_manager_name):
def close_connections():
if queue_manager_name == "pika":
try:
queue_manager.connection.close()
except (pika.exceptions.StreamLostError, pika.exceptions.ConnectionWrongStateError, ConnectionResetError):
logger.debug("Connection was already closed when attempting to close explicitly.")
def close_channel():
if queue_manager_name == "pika":
try:
queue_manager.channel.close()
except pika.exceptions.ChannelWrongStateError:
logger.debug("Channel was already closed when attempting to close explicitly.")
queue_manager = get_queue_manager(queue_manager_name)
yield queue_manager
close_connections()
close_channel()
@pytest.fixture(scope="session")
def callback():
def inner(request):
return request["data"].decode() * 2
return inner
@pytest.fixture
def analysis_callback(callback):
def inner(request):
return callback(request)
return inner
@pytest.fixture
def response_strategy(response_strategy_name, storage):
if response_strategy_name == "storage":
return StorageStrategy(storage)
if response_strategy_name == "forwarding":
return ForwardingStrategy()
@pytest.fixture()
def visitor(storage, analysis_callback, response_strategy):
return QueueVisitor(storage, analysis_callback, response_strategy)

View File

@ -1,126 +0,0 @@
import gzip
import json
import logging
from operator import itemgetter
import pytest
from pyinfra.exceptions import ProcessingFailure
from pyinfra.queue.consumer import Consumer
from pyinfra.visitor import get_object_descriptor, ForwardingStrategy
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@pytest.fixture(scope="session")
def consumer(queue_manager, callback):
return Consumer(callback, queue_manager)
@pytest.fixture(scope="session")
def access_callback():
return itemgetter("fileId")
@pytest.fixture()
def items():
def inner():
for i in range(3):
body = {
"dossierId": "folder",
"fileId": f"file{i}",
"targetFileExtension": "in.gz",
"responseFileExtension": "out.gz",
}
yield f"{i}".encode(), body
return list(inner())
class TestConsumer:
@pytest.mark.parametrize("queue_manager_name", ["mock", "pika"], scope="session")
def test_consuming_empty_input_queue_does_not_put_anything_on_output_queue(self, consumer, queue_manager):
queue_manager.clear()
consumer.consume()
assert queue_manager.output_queue.empty()
@pytest.mark.parametrize("queue_manager_name", ["mock", "pika"], scope="session")
def test_consuming_nonempty_input_queue_puts_messages_on_output_queue_in_fifo_order(
self, consumer, queue_manager, callback
):
def produce_items():
return map(str, range(3))
def mock_visitor(callback):
def inner(data):
return callback({"data": data.encode()})
return inner
callback = mock_visitor(callback)
queue_manager.clear()
for item in produce_items():
queue_manager.publish_request(item)
requests = consumer.consume()
for _, r in zip(produce_items(), requests):
queue_manager.publish_response(r, callback)
assert queue_manager.output_queue.to_list() == ["00", "11", "22"]
@pytest.mark.parametrize("queue_manager_name", ["mock", "pika"], scope="session")
@pytest.mark.parametrize("client_name", ["mock", "s3", "azure"], scope="session")
@pytest.mark.parametrize("response_strategy_name", ["forwarding", "storage"], scope="session")
def test_consuming_nonempty_input_queue_with_visitor_puts_messages_on_output_queue_in_fifo_order(
self, consumer, queue_manager, visitor, bucket_name, storage, items
):
visitor.response_strategy = ForwardingStrategy()
queue_manager.clear()
storage.clear_bucket(bucket_name)
for data, message in items:
storage.put_object(**get_object_descriptor(message), data=gzip.compress(data))
queue_manager.publish_request(message)
requests = consumer.consume(inactivity_timeout=5)
for itm, req in zip(items, requests):
logger.debug(f"Processing item {itm}")
queue_manager.publish_response(req, visitor)
assert list(map(itemgetter("data"), queue_manager.output_queue.to_list())) == ["00", "11", "22"]
@pytest.mark.parametrize("queue_manager_name", ["pika"], scope="session")
def test_message_is_republished_when_callback_raises_processing_failure_exception(
self, consumer, queue_manager, bucket_name, items
):
class DebugError(Exception):
pass
def callback(_):
raise ProcessingFailure()
def reject_patch(*args, **kwargs):
raise DebugError()
queue_manager.reject = reject_patch
queue_manager.clear()
for data, message in items:
queue_manager.publish_request(message)
requests = consumer.consume()
logger = logging.getLogger("pyinfra.queue.queue_manager.pika_queue_manager")
logger.addFilter(lambda record: False)
with pytest.raises(DebugError):
while True:
queue_manager.publish_response(next(requests), callback)

View File

@ -1,38 +0,0 @@
import gzip
import json
import pytest
from pyinfra.visitor import get_object_descriptor, get_response_object_descriptor
@pytest.fixture()
def body():
return {"dossierId": "folder", "fileId": "file", "targetFileExtension": "in.gz", "responseFileExtension": "out.gz"}
@pytest.mark.parametrize("client_name", ["mock", "azure", "s3"], scope="session")
class TestVisitor:
@pytest.mark.parametrize("response_strategy_name", ["forwarding", "storage"], scope="session")
def test_given_a_input_queue_message_callback_pulls_the_data_from_storage(
self, visitor, body, storage, bucket_name
):
storage.clear_bucket(bucket_name)
storage.put_object(**get_object_descriptor(body), data=gzip.compress(b"content"))
data_received = visitor.load_data(body)
assert b"content" == data_received
@pytest.mark.parametrize("response_strategy_name", ["forwarding", "storage"], scope="session")
def test_visitor_pulls_and_processes_data(self, visitor, body, storage, bucket_name):
storage.clear_bucket(bucket_name)
storage.put_object(**get_object_descriptor(body), data=gzip.compress("2".encode()))
response_body = visitor.load_and_process(body)
assert response_body["data"] == "22"
@pytest.mark.parametrize("response_strategy_name", ["storage"], scope="session")
def test_visitor_puts_response_on_storage(self, visitor, body, storage, bucket_name):
storage.clear_bucket(bucket_name)
storage.put_object(**get_object_descriptor(body), data=gzip.compress("2".encode()))
response_body = visitor(body)
assert "data" not in response_body
assert json.loads(gzip.decompress(storage.get_object(**get_response_object_descriptor(body))))["data"] == "22"

View File

@ -1,52 +0,0 @@
import logging
import pytest
from pyinfra.exceptions import DataLoadingFailure
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@pytest.mark.parametrize("client_name", ["mock", "azure", "s3"], scope="session")
class TestStorage:
def test_clearing_bucket_yields_empty_bucket(self, storage, bucket_name):
storage.clear_bucket(bucket_name)
data_received = storage.get_all_objects(bucket_name)
assert not {*data_received}
def test_getting_object_put_in_bucket_is_object(self, storage, bucket_name):
storage.clear_bucket(bucket_name)
storage.put_object(bucket_name, "file", b"content")
data_received = storage.get_object(bucket_name, "file")
assert b"content" == data_received
def test_getting_nested_object_put_in_bucket_is_nested_object(self, storage, bucket_name):
storage.clear_bucket(bucket_name)
storage.put_object(bucket_name, "folder/file", b"content")
data_received = storage.get_object(bucket_name, "folder/file")
assert b"content" == data_received
def test_getting_objects_put_in_bucket_are_objects(self, storage, bucket_name):
storage.clear_bucket(bucket_name)
storage.put_object(bucket_name, "file1", b"content 1")
storage.put_object(bucket_name, "folder/file2", b"content 2")
data_received = storage.get_all_objects(bucket_name)
assert {b"content 1", b"content 2"} == {*data_received}
def test_make_bucket_produces_bucket(self, storage, bucket_name):
storage.clear_bucket(bucket_name)
storage.make_bucket(bucket_name)
assert storage.has_bucket(bucket_name)
def test_listing_bucket_files_yields_all_files_in_bucket(self, storage, bucket_name):
storage.clear_bucket(bucket_name)
storage.put_object(bucket_name, "file1", b"content 1")
storage.put_object(bucket_name, "file2", b"content 2")
full_names_received = storage.get_all_object_names(bucket_name)
assert {(bucket_name, "file1"), (bucket_name, "file2")} == {*full_names_received}
def test_data_loading_failure_raised_if_object_not_present(self, storage, bucket_name):
storage.clear_bucket(bucket_name)
with pytest.raises(DataLoadingFailure):
storage.get_object(bucket_name, "folder/file")

48
tests/conftest.py Normal file
View File

@ -0,0 +1,48 @@
import json
import pytest
from pyinfra.config.loader import load_settings, local_pyinfra_root_path
from pyinfra.queue.manager import QueueManager
from pyinfra.storage.connection import get_storage
@pytest.fixture(scope="session")
def settings():
return load_settings(local_pyinfra_root_path / "config/")
@pytest.fixture(scope="class")
def storage(storage_backend, settings):
settings.storage.backend = storage_backend
storage = get_storage(settings)
storage.make_bucket()
yield storage
storage.clear_bucket()
@pytest.fixture(scope="session")
def queue_manager(settings):
settings.rabbitmq_heartbeat = 10
settings.connection_sleep = 5
settings.rabbitmq.max_retries = 3
settings.rabbitmq.max_delay = 10
queue_manager = QueueManager(settings)
yield queue_manager
@pytest.fixture
def input_message():
return json.dumps(
{
"targetFilePath": "test/target.json.gz",
"responseFilePath": "test/response.json.gz",
}
)
@pytest.fixture
def stop_message():
return "STOP"

6
tests/data.dvc Normal file
View File

@ -0,0 +1,6 @@
outs:
- md5: 75cc98b7c8fcf782a7d4941594e6bc12.dir
size: 134913
nfiles: 9
hash: md5
path: data

41
tests/docker-compose.yml Normal file
View File

@ -0,0 +1,41 @@
version: '3.8'
services:
minio:
image: minio/minio:latest
container_name: minio
ports:
- "9000:9000"
environment:
- MINIO_ROOT_PASSWORD=password
- MINIO_ROOT_USER=root
volumes:
- /tmp/data/minio_store:/data
command: server /data
network_mode: "bridge"
extra_hosts:
- "host.docker.internal:host-gateway"
rabbitmq:
image: docker.io/bitnami/rabbitmq:latest
container_name: rabbitmq
ports:
# - '4369:4369'
# - '5551:5551'
# - '5552:5552'
- '5672:5672'
- '15672:15672'
# - '25672:25672'
environment:
- RABBITMQ_SECURE_PASSWORD=yes
- RABBITMQ_VM_MEMORY_HIGH_WATERMARK=100%
- RABBITMQ_DISK_FREE_ABSOLUTE_LIMIT=20Gi
- RABBITMQ_MANAGEMENT_ALLOW_WEB_ACCESS=true
network_mode: "bridge"
volumes:
- /tmp/bitnami/rabbitmq/.rabbitmq/:/data/bitnami
healthcheck:
test: [ "CMD", "curl", "-f", "http://localhost:15672" ]
interval: 30s
timeout: 10s
retries: 5
extra_hosts:
- "host.docker.internal:host-gateway"

View File

@ -0,0 +1,41 @@
from time import sleep
import pytest
from pyinfra.utils.opentelemetry import get_exporter, instrument_pika, setup_trace
@pytest.fixture(scope="session")
def exporter(settings):
settings.tracing.opentelemetry.exporter = "json"
return get_exporter(settings)
@pytest.fixture(autouse=True)
def setup_test_trace(settings, exporter, tracing_type):
settings.tracing.type = tracing_type
setup_trace(settings, exporter=exporter)
class TestOpenTelemetry:
@pytest.mark.xfail(
reason="Azure Monitor requires a connection string. Therefore the test is allowed to fail in this case."
)
@pytest.mark.parametrize("tracing_type", ["opentelemetry", "azure_monitor"])
def test_queue_messages_are_traced(self, queue_manager, input_message, stop_message, settings, exporter):
instrument_pika()
queue_manager.purge_queues()
queue_manager.publish_message_to_input_queue(input_message)
queue_manager.publish_message_to_input_queue(stop_message)
def callback(_):
sleep(2)
return {"flat": "earth"}
queue_manager.start_consuming(callback)
for exported_trace in exporter.traces:
assert (
exported_trace["resource"]["attributes"]["service.name"] == settings.tracing.opentelemetry.service_name
)

View File

@ -0,0 +1,55 @@
import re
from time import sleep
import pytest
import requests
from fastapi import FastAPI
from pyinfra.webserver.prometheus import (
add_prometheus_endpoint,
make_prometheus_processing_time_decorator_from_settings,
)
from pyinfra.webserver.utils import create_webserver_thread_from_settings
@pytest.fixture(scope="class")
def app_with_prometheus_endpoint(settings):
app = FastAPI()
app = add_prometheus_endpoint(app)
thread = create_webserver_thread_from_settings(app, settings)
thread.daemon = True
thread.start()
sleep(1)
yield
thread.join(timeout=1)
@pytest.fixture
def monitored_function(settings):
@make_prometheus_processing_time_decorator_from_settings(settings)
def process(*args, **kwargs):
sleep(0.5)
return process
class TestPrometheusMonitor:
def test_prometheus_endpoint_is_available(self, app_with_prometheus_endpoint, settings):
resp = requests.get(f"http://{settings.webserver.host}:{settings.webserver.port}/prometheus")
assert resp.status_code == 200
def test_processing_with_a_monitored_fn_increases_parameter_counter(
self, app_with_prometheus_endpoint, monitored_function, settings
):
pattern = re.compile(rf".*{settings.metrics.prometheus.prefix}_processing_time_count (\d\.\d).*")
resp = requests.get(f"http://{settings.webserver.host}:{settings.webserver.port}/prometheus")
assert pattern.search(resp.text).group(1) == "0.0"
monitored_function()
resp = requests.get(f"http://{settings.webserver.host}:{settings.webserver.port}/prometheus")
assert pattern.search(resp.text).group(1) == "1.0"
monitored_function()
resp = requests.get(f"http://{settings.webserver.host}:{settings.webserver.port}/prometheus")
assert pattern.search(resp.text).group(1) == "2.0"

View File

@ -0,0 +1,90 @@
import json
from sys import stdout
from time import sleep
import pika
from kn_utils.logging import logger
logger.remove()
logger.add(sink=stdout, level="DEBUG")
def make_callback(process_time):
def callback(x):
sleep(process_time)
return {"status": "success"}
return callback
def file_not_found_callback(x):
raise FileNotFoundError("File not found")
class TestQueueManager:
def test_not_available_file_leads_to_message_rejection_without_crashing(
self, queue_manager, input_message, stop_message
):
queue_manager.purge_queues()
queue_manager.publish_message_to_input_queue(input_message)
queue_manager.publish_message_to_input_queue(stop_message)
queue_manager.start_consuming(file_not_found_callback)
def test_processing_of_several_messages(self, queue_manager, input_message, stop_message):
queue_manager.purge_queues()
for _ in range(2):
queue_manager.publish_message_to_input_queue(input_message)
queue_manager.publish_message_to_input_queue(stop_message)
callback = make_callback(1)
queue_manager.start_consuming(callback)
for _ in range(2):
response = queue_manager.get_message_from_output_queue()
assert response is not None
assert json.loads(response[2].decode()) == {"status": "success"}
def test_all_headers_beginning_with_x_are_forwarded(self, queue_manager, input_message, stop_message):
queue_manager.purge_queues()
properties = pika.BasicProperties(
headers={
"X-TENANT-ID": "redaction",
"X-OTHER-HEADER": "other-header-value",
"x-tenant_id": "tenant-id-value",
"x_should_not_be_forwarded": "should-not-be-forwarded-value",
}
)
queue_manager.publish_message_to_input_queue(input_message, properties=properties)
queue_manager.publish_message_to_input_queue(stop_message)
callback = make_callback(0.2)
queue_manager.start_consuming(callback)
response = queue_manager.get_message_from_output_queue()
assert json.loads(response[2].decode()) == {"status": "success"}
assert response[1].headers["X-TENANT-ID"] == "redaction"
assert response[1].headers["X-OTHER-HEADER"] == "other-header-value"
assert response[1].headers["x-tenant_id"] == "tenant-id-value"
assert "x_should_not_be_forwarded" not in response[1].headers
def test_message_processing_does_not_block_heartbeat(self, queue_manager, input_message, stop_message):
queue_manager.purge_queues()
queue_manager.publish_message_to_input_queue(input_message)
queue_manager.publish_message_to_input_queue(stop_message)
callback = make_callback(15)
queue_manager.start_consuming(callback)
response = queue_manager.get_message_from_output_queue()
assert json.loads(response[2].decode()) == {"status": "success"}

View File

@ -0,0 +1,166 @@
import gzip
import json
from time import sleep
import pytest
from fastapi import FastAPI
from pyinfra.storage.connection import get_storage_for_tenant
from pyinfra.storage.utils import (
download_data_bytes_as_specified_in_message,
upload_data_as_specified_in_message,
)
from pyinfra.utils.cipher import encrypt
from pyinfra.webserver.utils import create_webserver_thread
@pytest.mark.parametrize("storage_backend", ["azure", "s3"], scope="class")
class TestStorage:
def test_clearing_bucket_yields_empty_bucket(self, storage):
storage.clear_bucket()
data_received = storage.get_all_objects()
assert not {*data_received}
def test_getting_object_put_in_bucket_is_object(self, storage):
storage.clear_bucket()
storage.put_object("file", b"content")
data_received = storage.get_object("file")
assert b"content" == data_received
def test_object_put_in_bucket_exists_on_storage(self, storage):
storage.clear_bucket()
storage.put_object("file", b"content")
assert storage.exists("file")
def test_getting_nested_object_put_in_bucket_is_nested_object(self, storage):
storage.clear_bucket()
storage.put_object("folder/file", b"content")
data_received = storage.get_object("folder/file")
assert b"content" == data_received
def test_getting_objects_put_in_bucket_are_objects(self, storage):
storage.clear_bucket()
storage.put_object("file1", b"content 1")
storage.put_object("folder/file2", b"content 2")
data_received = storage.get_all_objects()
assert {b"content 1", b"content 2"} == {*data_received}
def test_make_bucket_produces_bucket(self, storage):
storage.clear_bucket()
storage.make_bucket()
assert storage.has_bucket()
def test_listing_bucket_files_yields_all_files_in_bucket(self, storage):
storage.clear_bucket()
storage.put_object("file1", b"content 1")
storage.put_object("file2", b"content 2")
full_names_received = storage.get_all_object_names()
assert {(storage.bucket, "file1"), (storage.bucket, "file2")} == {*full_names_received}
def test_data_loading_failure_raised_if_object_not_present(self, storage):
storage.clear_bucket()
with pytest.raises(Exception):
storage.get_object("folder/file")
@pytest.fixture(scope="class")
def tenant_server_mock(settings, tenant_server_host, tenant_server_port):
app = FastAPI()
@app.get("/azure_tenant")
def get_azure_storage_info():
return {
"azureStorageConnection": {
"connectionString": encrypt(
settings.storage.tenant_server.public_key, settings.storage.azure.connection_string
),
"containerName": settings.storage.azure.container,
}
}
@app.get("/s3_tenant")
def get_s3_storage_info():
return {
"s3StorageConnection": {
"endpoint": settings.storage.s3.endpoint,
"key": settings.storage.s3.key,
"secret": encrypt(settings.storage.tenant_server.public_key, settings.storage.s3.secret),
"region": settings.storage.s3.region,
"bucketName": settings.storage.s3.bucket,
}
}
thread = create_webserver_thread(app, tenant_server_port, tenant_server_host)
thread.daemon = True
thread.start()
sleep(1)
yield
thread.join(timeout=1)
@pytest.mark.parametrize("tenant_id", ["azure_tenant", "s3_tenant"], scope="class")
@pytest.mark.parametrize("tenant_server_host", ["localhost"], scope="class")
@pytest.mark.parametrize("tenant_server_port", [8000], scope="class")
class TestMultiTenantStorage:
def test_storage_connection_from_tenant_id(
self, tenant_id, tenant_server_mock, settings, tenant_server_host, tenant_server_port
):
settings["storage"]["tenant_server"]["endpoint"] = f"http://{tenant_server_host}:{tenant_server_port}"
storage = get_storage_for_tenant(
tenant_id,
settings["storage"]["tenant_server"]["endpoint"],
settings["storage"]["tenant_server"]["public_key"],
)
storage.put_object("file", b"content")
data_received = storage.get_object("file")
assert b"content" == data_received
@pytest.fixture
def payload(payload_type):
if payload_type == "target_response_file_path":
return {
"targetFilePath": "test/file.target.json.gz",
"responseFilePath": "test/file.response.json.gz",
}
elif payload_type == "dossier_id_file_id":
return {
"dossierId": "test",
"fileId": "file",
"targetFileExtension": "target.json.gz",
"responseFileExtension": "response.json.gz",
}
elif payload_type == "target_file_dict":
return {
"targetFilePath": {"file_1": "test/file.target.json.gz", "file_2": "test/file.target.json.gz"},
"responseFilePath": "test/file.response.json.gz",
}
@pytest.mark.parametrize(
"payload_type",
[
"target_response_file_path",
"dossier_id_file_id",
"target_file_dict",
],
scope="class",
)
@pytest.mark.parametrize("storage_backend", ["azure", "s3"], scope="class")
class TestDownloadAndUploadFromMessage:
def test_download_and_upload_from_message(self, storage, payload, payload_type):
storage.clear_bucket()
result = {"process_result": "success"}
storage_data = {**payload, "data": result}
packed_data = gzip.compress(json.dumps(storage_data).encode())
storage.put_object("test/file.target.json.gz", packed_data)
_ = download_data_bytes_as_specified_in_message(storage, payload)
upload_data_as_specified_in_message(storage, payload, result)
data = json.loads(gzip.decompress(storage.get_object("test/file.response.json.gz")).decode())
assert data == storage_data

View File

@ -0,0 +1,29 @@
import pytest
from pyinfra.utils.cipher import decrypt, encrypt
@pytest.fixture
def ciphertext():
return "AAAADBRzag4/aAE2+rSekyI5phVZ1e0wwSaRkGQTLftPyVvq8vLYZzwxW48Wozc3/w=="
@pytest.fixture
def plaintext():
return "connectzionString"
@pytest.fixture
def public_key():
return "redaction"
class TestDecryption:
def test_decrypt_ciphertext(self, public_key, ciphertext, plaintext):
result = decrypt(public_key, ciphertext)
assert result == plaintext
def test_encrypt_plaintext(self, public_key, plaintext):
ciphertext = encrypt(public_key, plaintext)
result = decrypt(public_key, ciphertext)
assert plaintext == result

Some files were not shown because too many files have changed in this diff Show More