Julius Unverfehrt 5d611d5fae Pull request #17: RED-4329 add prometheus
Merge in RR/image-prediction from RED-4329-add-prometheus to master

Squashed commit of the following:

commit 7fcf256c5277a3cfafcaf76c3116e3643ad01fa4
Merge: 8381621 c14d00c
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Jun 21 15:41:14 2022 +0200

    Merge branch 'master' into RED-4329-add-prometheus

commit 8381621ae08b1a91563c9c655020ec55bb58ecc5
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Jun 21 15:24:50 2022 +0200

    add prometheus endpoint

commit 26f07088b0a711b6f9db0974f5dfc8aa8ad4e1dc
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Jun 21 15:14:34 2022 +0200

    refactor

commit c563aa505018f8a14931a16a9061d361b5d4c383
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Jun 21 15:10:19 2022 +0200

    test bamboo build

commit 2b8446e703617c6897b6149846f2548ec292a9a1
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date:   Tue Jun 21 14:40:44 2022 +0200

    RED-4329 add prometheus endpoint with summary metric
2022-06-21 15:49:01 +02:00

85 lines
2.4 KiB
Python

import multiprocessing
import traceback
from typing import Callable
from flask import Flask, request, jsonify
from prometheus_client import generate_latest, CollectorRegistry, Summary
from image_prediction.utils import get_logger
logger = get_logger()
def run_in_process(func):
p = multiprocessing.Process(target=func)
p.start()
p.join()
def wrap_in_process(func_to_wrap):
def build_function_and_run_in_process(*args, **kwargs):
def func():
try:
result = func_to_wrap(*args, **kwargs)
return_dict["result"] = result
except:
logger.error(traceback.format_exc())
manager = multiprocessing.Manager()
return_dict = manager.dict()
run_in_process(func)
return return_dict.get("result", None)
return build_function_and_run_in_process
def make_prediction_server(predict_fn: Callable):
app = Flask(__name__)
registry = CollectorRegistry(auto_describe=True)
metric = Summary(
f"redactmanager_imageClassification_seconds", f"Time spent on image-service classification.", registry=registry
)
@app.route("/ready", methods=["GET"])
def ready():
resp = jsonify("OK")
resp.status_code = 200
return resp
@app.route("/health", methods=["GET"])
def healthy():
resp = jsonify("OK")
resp.status_code = 200
return resp
def __failure():
response = jsonify("Analysis failed")
response.status_code = 500
return response
@app.route("/predict", methods=["POST"])
@app.route("/", methods=["POST"])
@metric.time()
def predict():
# Tensorflow does not free RAM. Workaround: Run prediction function (which instantiates a model) in sub-process.
# See: https://stackoverflow.com/questions/39758094/clearing-tensorflow-gpu-memory-after-model-execution
predict_fn_wrapped = wrap_in_process(predict_fn)
logger.info("Analysing...")
predictions = predict_fn_wrapped(request.data)
if predictions is not None:
response = jsonify(predictions)
logger.info("Analysis completed.")
return response
else:
logger.error("Analysis failed.")
return __failure()
@app.route("/prometheus", methods=["GET"])
def prometheus():
return generate_latest(registry=registry)
return app