Merge in RR/image-prediction from RED-6205-add-prometheus-monitoring to master
Squashed commit of the following:
commit 6932b5ee579a31d0317dc3f76acb8dd2845fdb4b
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Thu Mar 16 17:30:57 2023 +0100
update pyinfra
commit d6e55534623eae2edcddaa6dd333f93171d421dc
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Thu Mar 16 16:30:14 2023 +0100
set pyinfra subproject to current master commit
commit 030dc660e6060ae326c32fba8c2944a10866fbb6
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Thu Mar 16 16:25:19 2023 +0100
adapt serve script to advanced pyinfra API including monitoring of the processing time of images.
commit 0fa0c44c376c52653e517d257a35793797f7be31
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Thu Mar 16 15:19:57 2023 +0100
Update dockerfile to work with new pyinfra package setup utilizing pyproject.toml instad of setup.py and requirments.txt
commit aad53c4d313f908de93a13e69e2cb150db3be6cb
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Thu Mar 16 14:16:04 2023 +0100
remove no longer needed dependencies
38 lines
1.4 KiB
Python
38 lines
1.4 KiB
Python
from image_prediction import logger
|
|
from image_prediction.config import Config
|
|
from image_prediction.locations import CONFIG_FILE
|
|
from image_prediction.pipeline import load_pipeline
|
|
from image_prediction.utils.banner import load_banner
|
|
from image_prediction.utils.process_wrapping import wrap_in_process
|
|
from pyinfra import config
|
|
from pyinfra.payload_processing import make_payload_processor
|
|
from pyinfra.queue.queue_manager import QueueManager
|
|
|
|
PYINFRA_CONFIG = config.get_config()
|
|
IMAGE_CONFIG = Config(CONFIG_FILE)
|
|
|
|
logger.setLevel(PYINFRA_CONFIG.logging_level_root)
|
|
|
|
|
|
# A component of the processing pipeline (probably tensorflow) does not release allocated memory (see RED-4206).
|
|
# See: https://stackoverflow.com/questions/39758094/clearing-tensorflow-gpu-memory-after-model-execution
|
|
# Workaround: Manage Memory with the operating system, by wrapping the processing in a sub-process.
|
|
# FIXME: Find more fine-grained solution or if the problem occurs persistently for python services,
|
|
@wrap_in_process
|
|
def process_data(data: bytes) -> list:
|
|
pipeline = load_pipeline(verbose=IMAGE_CONFIG.service.verbose, batch_size=IMAGE_CONFIG.service.batch_size)
|
|
return list(pipeline(data))
|
|
|
|
|
|
def main():
|
|
logger.info(load_banner())
|
|
|
|
process_payload = make_payload_processor(process_data, config=PYINFRA_CONFIG)
|
|
|
|
queue_manager = QueueManager(PYINFRA_CONFIG)
|
|
queue_manager.start_consuming(process_payload)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|