Merge in RR/pyinfra from RED-6118-multi-tenancy to master
Squashed commit of the following:
commit 0a1301f9d7a12a1097e6bf9a1bb0a94025312d0a
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Thu Feb 16 09:12:54 2023 +0100
delete (for now) not needed exception module
commit 9b624f9c95c129bf186eaea8405a14d359ccb1ae
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Thu Feb 16 09:08:57 2023 +0100
implement message properties forwarding
- revert tenant validation logic since this functionality is not wanted
- implement request message properties forwarding to response message.
Thus, all message headers including x-tenant-id are present in the
reponse.
commit ddac812d32eeec09d9434c32595875eb354767f8
Merge: ed4b495 6828c65
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Wed Feb 15 17:00:54 2023 +0100
Merge branch 'master' of ssh://git.iqser.com:2222/rr/pyinfra into RED-6118-multi-tenancy
commit ed4b4956c6cb6d201fc29b0318078dfb8fa99006
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Wed Feb 15 10:00:28 2023 +0100
refactor
commit 970fd72aa73ace97d36f129031fb143209c5076b
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Tue Feb 14 17:22:54 2023 +0100
RED-6118 make pyinfra multi-tenant ready
- refactor message validation logic
- add tenant validation step:
- messages without header/tenant id are accepted for now, until
multi-tenancy is implemented in backend
- only valid tenant is 'redaction'
commit 0f04e799620e01b3346eeaf86f3e941830824202
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Tue Feb 14 15:42:28 2023 +0100
add dev scripts
- add scripts to ease pyinfra development by allowing to run pyinfra
locally with callback mock and publishing script.
55 lines
1.7 KiB
Python
55 lines
1.7 KiB
Python
import gzip
|
|
import json
|
|
import logging
|
|
from typing import Callable
|
|
|
|
from pyinfra.config import get_config
|
|
from pyinfra.queue.queue_manager import QueueManager
|
|
from pyinfra.storage import get_storage
|
|
|
|
logging.basicConfig()
|
|
logger = logging.getLogger()
|
|
logger.setLevel(logging.INFO)
|
|
|
|
|
|
def make_callback(processor: Callable, config=get_config()):
|
|
bucket = config.storage_bucket
|
|
storage = get_storage(config)
|
|
|
|
def callback(request_message):
|
|
dossier_id = request_message["dossierId"]
|
|
file_id = request_message["fileId"]
|
|
logger.info(f"Processing {dossier_id=} {file_id=} ...")
|
|
target_file_name = f"{dossier_id}/{file_id}.{request_message['targetFileExtension']}"
|
|
response_file_name = f"{dossier_id}/{file_id}.{request_message['responseFileExtension']}"
|
|
|
|
if not storage.exists(bucket, target_file_name):
|
|
logger.warning(f"{target_file_name=} not present in {bucket=}, cancelling processing...")
|
|
return None
|
|
|
|
object_bytes = storage.get_object(bucket, target_file_name)
|
|
object_bytes = gzip.decompress(object_bytes)
|
|
result_body = list(processor(object_bytes))
|
|
|
|
result = {**request_message, "data": result_body}
|
|
storage_bytes = gzip.compress(json.dumps(result).encode("utf-8"))
|
|
storage.put_object(bucket, response_file_name, storage_bytes)
|
|
|
|
return {"dossierId": dossier_id, "fileId": file_id}
|
|
|
|
return callback
|
|
|
|
|
|
def process(body):
|
|
return [{"response_key": "response_value"}]
|
|
|
|
|
|
def main():
|
|
logger.info("Start consuming...")
|
|
queue_manager = QueueManager(get_config())
|
|
queue_manager.start_consuming(make_callback(process))
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|