Introduces new payload parsing logic to be able to process absolute file paths. The queue message is expected to contain the keys "targetFilePath" and "responseFilePath". To ensure backward-compatibility, the legacy "dossierId", "fileId" messages are still supported.
107 lines
4.6 KiB
Python
107 lines
4.6 KiB
Python
from os import environ
|
|
from typing import Union
|
|
|
|
from pyinfra.utils.url_parsing import validate_and_parse_s3_endpoint
|
|
|
|
|
|
def read_from_environment(environment_variable_name, default_value):
|
|
return environ.get(environment_variable_name, default_value)
|
|
|
|
|
|
def normalize_bool(value: Union[str, bool]):
|
|
return value if isinstance(value, bool) else value in ["True", "true"]
|
|
|
|
|
|
class Config:
|
|
def __init__(self):
|
|
# Logging level for service logger
|
|
self.logging_level_root = read_from_environment("LOGGING_LEVEL_ROOT", "DEBUG")
|
|
|
|
# Enables Prometheus monitoring
|
|
self.monitoring_enabled = normalize_bool(read_from_environment("MONITORING_ENABLED", True))
|
|
|
|
# Prometheus metric prefix, per convention '{product_name}_{service_name}_{parameter}'
|
|
# In the current implementation, the results of a service define the parameter that is monitored,
|
|
# i.e. analysis result per image means processing time per image is monitored.
|
|
# TODO: add validator since some characters like '-' are not allowed by python prometheus
|
|
self.prometheus_metric_prefix = read_from_environment(
|
|
"PROMETHEUS_METRIC_PREFIX", "redactmanager_research_service_parameter"
|
|
)
|
|
|
|
# Prometheus webserver address and port
|
|
self.prometheus_host = "0.0.0.0"
|
|
self.prometheus_port = 8080
|
|
|
|
# RabbitMQ host address
|
|
self.rabbitmq_host = read_from_environment("RABBITMQ_HOST", "localhost")
|
|
|
|
# RabbitMQ host port
|
|
self.rabbitmq_port = read_from_environment("RABBITMQ_PORT", "5672")
|
|
|
|
# RabbitMQ username
|
|
self.rabbitmq_username = read_from_environment("RABBITMQ_USERNAME", "user")
|
|
|
|
# RabbitMQ password
|
|
self.rabbitmq_password = read_from_environment("RABBITMQ_PASSWORD", "bitnami")
|
|
|
|
# Controls AMQP heartbeat timeout in seconds
|
|
self.rabbitmq_heartbeat = int(read_from_environment("RABBITMQ_HEARTBEAT", 60))
|
|
|
|
# Controls AMQP connection sleep timer in seconds
|
|
# important for heartbeat to come through while main function runs on other thread
|
|
self.rabbitmq_connection_sleep = int(read_from_environment("RABBITMQ_CONNECTION_SLEEP", 5))
|
|
|
|
# Queue name for requests to the service
|
|
self.request_queue = read_from_environment("REQUEST_QUEUE", "request_queue")
|
|
|
|
# Queue name for responses by service
|
|
self.response_queue = read_from_environment("RESPONSE_QUEUE", "response_queue")
|
|
|
|
# Queue name for failed messages
|
|
self.dead_letter_queue = read_from_environment("DEAD_LETTER_QUEUE", "dead_letter_queue")
|
|
|
|
# The type of storage to use {s3, azure}
|
|
self.storage_backend = read_from_environment("STORAGE_BACKEND", "s3")
|
|
|
|
# The bucket / container to pull files specified in queue requests from
|
|
if self.storage_backend == "s3":
|
|
self.storage_bucket = read_from_environment("STORAGE_BUCKET_NAME", "redaction")
|
|
else:
|
|
self.storage_bucket = read_from_environment("STORAGE_AZURECONTAINERNAME", "redaction")
|
|
|
|
# S3 connection security flag and endpoint
|
|
storage_address = read_from_environment("STORAGE_ENDPOINT", "http://127.0.0.1:9000")
|
|
self.storage_secure_connection, self.storage_endpoint = validate_and_parse_s3_endpoint(storage_address)
|
|
|
|
# User for s3 storage
|
|
self.storage_key = read_from_environment("STORAGE_KEY", "root")
|
|
|
|
# Password for s3 storage
|
|
self.storage_secret = read_from_environment("STORAGE_SECRET", "password")
|
|
|
|
# Region for s3 storage
|
|
self.storage_region = read_from_environment("STORAGE_REGION", "eu-central-1")
|
|
|
|
# Connection string for Azure storage
|
|
self.storage_azureconnectionstring = read_from_environment(
|
|
"STORAGE_AZURECONNECTIONSTRING",
|
|
"DefaultEndpointsProtocol=...",
|
|
)
|
|
|
|
# Allowed file types for downloaded and uploaded storage objects that get processed by the service
|
|
self.allowed_file_types = ["json", "pdf"]
|
|
self.allowed_compression_types = ["gz"]
|
|
|
|
self.allowed_processing_parameters = ["operation"]
|
|
|
|
# config for x-tenant-endpoint to receive storage connection information per tenant
|
|
self.tenant_decryption_public_key = read_from_environment("TENANT_PUBLIC_KEY", "redaction")
|
|
self.tenant_endpoint = read_from_environment("TENANT_ENDPOINT", "http://tenant-user-management:8081/internal-api/tenants")
|
|
|
|
# Value to see if we should write a consumer token to a file
|
|
self.write_consumer_token = read_from_environment("WRITE_CONSUMER_TOKEN", "False")
|
|
|
|
|
|
def get_config() -> Config:
|
|
return Config()
|