Pull request #17: RED-4329 add prometheus
Merge in RR/image-prediction from RED-4329-add-prometheus to master
Squashed commit of the following:
commit 7fcf256c5277a3cfafcaf76c3116e3643ad01fa4
Merge: 8381621 c14d00c
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Tue Jun 21 15:41:14 2022 +0200
Merge branch 'master' into RED-4329-add-prometheus
commit 8381621ae08b1a91563c9c655020ec55bb58ecc5
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Tue Jun 21 15:24:50 2022 +0200
add prometheus endpoint
commit 26f07088b0a711b6f9db0974f5dfc8aa8ad4e1dc
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Tue Jun 21 15:14:34 2022 +0200
refactor
commit c563aa505018f8a14931a16a9061d361b5d4c383
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Tue Jun 21 15:10:19 2022 +0200
test bamboo build
commit 2b8446e703617c6897b6149846f2548ec292a9a1
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Tue Jun 21 14:40:44 2022 +0200
RED-4329 add prometheus endpoint with summary metric
This commit is contained in:
parent
c14d00cac8
commit
5d611d5fae
@ -3,6 +3,7 @@ import traceback
|
||||
from typing import Callable
|
||||
|
||||
from flask import Flask, request, jsonify
|
||||
from prometheus_client import generate_latest, CollectorRegistry, Summary
|
||||
|
||||
from image_prediction.utils import get_logger
|
||||
|
||||
@ -34,6 +35,10 @@ def wrap_in_process(func_to_wrap):
|
||||
|
||||
def make_prediction_server(predict_fn: Callable):
|
||||
app = Flask(__name__)
|
||||
registry = CollectorRegistry(auto_describe=True)
|
||||
metric = Summary(
|
||||
f"redactmanager_imageClassification_seconds", f"Time spent on image-service classification.", registry=registry
|
||||
)
|
||||
|
||||
@app.route("/ready", methods=["GET"])
|
||||
def ready():
|
||||
@ -54,6 +59,7 @@ def make_prediction_server(predict_fn: Callable):
|
||||
|
||||
@app.route("/predict", methods=["POST"])
|
||||
@app.route("/", methods=["POST"])
|
||||
@metric.time()
|
||||
def predict():
|
||||
|
||||
# Tensorflow does not free RAM. Workaround: Run prediction function (which instantiates a model) in sub-process.
|
||||
@ -71,4 +77,8 @@ def make_prediction_server(predict_fn: Callable):
|
||||
logger.error("Analysis failed.")
|
||||
return __failure()
|
||||
|
||||
@app.route("/prometheus", methods=["GET"])
|
||||
def prometheus():
|
||||
return generate_latest(registry=registry)
|
||||
|
||||
return app
|
||||
|
||||
@ -21,4 +21,5 @@ Pillow==9.1.0
|
||||
PDFNetPython3==9.1.0
|
||||
pdf2image==1.16.0
|
||||
frozendict==2.3.0
|
||||
protobuf<=3.20.*
|
||||
protobuf<=3.20.*
|
||||
prometheus-client==0.13.1
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user