apiVersion: apps/v1 kind: Deployment metadata: name: mail-classifier namespace: mailu spec: replicas: 1 selector: matchLabels: app: mail-classifier template: metadata: labels: app: mail-classifier spec: containers: - name: mail-classifier image: git.apps.sukany.cz/martin/mail-clasifier:latest imagePullPolicy: Always env: - name: IMAP_HOST value: "mailu-front.mailu.svc" - name: IMAP_PORT value: "993" - name: IMAP_USER valueFrom: secretKeyRef: name: mail-classifier-secret key: imap_user - name: IMAP_PASS valueFrom: secretKeyRef: name: mail-classifier-secret key: imap_pass # ---------- LLM backend konfigurace ---------- # Volba backendu: "ollama" nebo "openwebui" - name: LLM_BACKEND value: "openwebui" # Pokud používáš OpenWebUI (OpenRouter connection), # v k8s máš service "open-webui-service" na portu 8080: - name: OPENWEBUI_URL value: "http://open-webui-service.open-webui.svc:8080" # User API token / service token z OpenWebUI, # ulož ho do secretu mail-classifier-secret pod klíčem "openwebui_api_key" - name: OPENWEBUI_API_KEY valueFrom: secretKeyRef: name: mail-classifier-secret key: openwebui_api_key # Jméno modelu v OpenWebUI (OpenRouter) # Použij přesně to, které vidíš v UI, např. "openai/gpt-5-mini" - name: MODEL_NAME value: "openai/gpt-5-nano" # ---------- Obecná konfigurace ---------- - name: CHECK_INTERVAL value: "300" # 5 minut - name: MAX_BODY_CHARS value: "2000" - name: LOG_LEVEL value: "INFO" # na ladění DEBUG # Timeout / retry pro LLM (společné pro oba backendy) - name: LLM_TIMEOUT value: "120" - name: LLM_MAX_RETRIES value: "3" resources: requests: cpu: "100m" memory: "128Mi" limits: cpu: "500m" memory: "256Mi"