68 lines
1.8 KiB
TOML
68 lines
1.8 KiB
TOML
|
|
[asyncio]
|
|
max_concurrent_tasks = 10
|
|
|
|
[dynamic_tenant_queues]
|
|
enabled = true
|
|
|
|
[metrics.prometheus]
|
|
enabled = true
|
|
prefix = "redactmanager_cv_analysis_service"
|
|
|
|
[tracing]
|
|
enabled = true
|
|
# possible values "opentelemetry" | "azure_monitor" (Excpects APPLICATIONINSIGHTS_CONNECTION_STRING environment variable.)
|
|
type = "azure_monitor"
|
|
|
|
[tracing.opentelemetry]
|
|
endpoint = "http://otel-collector-opentelemetry-collector.otel-collector:4318/v1/traces"
|
|
service_name = "redactmanager_cv_analysis_service"
|
|
exporter = "otlp"
|
|
|
|
[webserver]
|
|
host = "0.0.0.0"
|
|
port = 8080
|
|
|
|
[rabbitmq]
|
|
host = "localhost"
|
|
port = 5672
|
|
username = ""
|
|
password = ""
|
|
heartbeat = 60
|
|
# Has to be a divider of heartbeat, and shouldn't be too big, since only in these intervals queue interactions happen (like receiving new messages)
|
|
# This is also the minimum time the service needs to process a message
|
|
connection_sleep = 5
|
|
input_queue = "request_queue"
|
|
output_queue = "response_queue"
|
|
dead_letter_queue = "dead_letter_queue"
|
|
|
|
tenant_event_queue_suffix = "_tenant_event_queue"
|
|
tenant_event_dlq_suffix = "_tenant_events_dlq"
|
|
tenant_exchange_name = "tenants-exchange"
|
|
queue_expiration_time = 300000 # 5 minutes in milliseconds
|
|
service_request_queue_prefix = "cv_analysis_request_queue"
|
|
service_request_exchange_name = "cv_analysis_request_exchange"
|
|
service_response_exchange_name = "cv_analysis_response_exchange"
|
|
service_dlq_name = "cv_analysis_dlq"
|
|
|
|
[storage]
|
|
backend = "s3"
|
|
|
|
[storage.s3]
|
|
bucket = "redaction"
|
|
endpoint = "http://127.0.0.1:9000"
|
|
key = ""
|
|
secret = ""
|
|
region = "eu-central-1"
|
|
|
|
[storage.azure]
|
|
container = "redaction"
|
|
connection_string = ""
|
|
|
|
[storage.tenant_server]
|
|
public_key = ""
|
|
endpoint = "http://tenant-user-management:8081/internal-api/tenants"
|
|
|
|
[kubernetes]
|
|
pod_name = "test_pod"
|