44 lines
1.2 KiB
Python
44 lines
1.2 KiB
Python
import logging
|
|
import time
|
|
|
|
from fastapi import FastAPI
|
|
|
|
from pyinfra.config.loader import load_settings
|
|
from pyinfra.monitor.prometheus import make_prometheus_processing_time_decorator_from_settings, add_prometheus_endpoint
|
|
from pyinfra.queue.callback import make_payload_processor
|
|
from pyinfra.queue.manager import QueueManager
|
|
from pyinfra.webserver.utils import create_webserver_thread_from_settings
|
|
|
|
logging.basicConfig()
|
|
logger = logging.getLogger()
|
|
logger.setLevel(logging.INFO)
|
|
|
|
settings = load_settings()
|
|
|
|
|
|
@make_prometheus_processing_time_decorator_from_settings(settings)
|
|
def json_processor_mock(_data: dict, _message: dict) -> dict:
|
|
time.sleep(5)
|
|
return {"result1": "result1"}
|
|
|
|
|
|
def main():
|
|
app = FastAPI()
|
|
app = add_prometheus_endpoint(app)
|
|
|
|
queue_manager = QueueManager(settings)
|
|
|
|
@app.get("/ready")
|
|
@app.get("/health")
|
|
def check_health():
|
|
return queue_manager.is_ready()
|
|
|
|
webserver_thread = create_webserver_thread_from_settings(app, settings)
|
|
webserver_thread.start()
|
|
callback = make_payload_processor(json_processor_mock, settings)
|
|
queue_manager.start_consuming(callback)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|