# This is the Helmfile we use to deploy our application
helmDefaults:
  historyMax: 10
  # wait for k8s resources via --wait. (default false)
  wait: true
  # if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout (default false, Implemented in Helm3.5)
  waitForJobs: true
  # time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks, and waits on pod/pvc/svc/deployment readiness) (default 300)
  timeout: 600
  atomic: true
  cleanupOnFail: true
  args:
    # FYI, this is slightly broken but not in any way that matters (I think)
    # https://github.com/helmfile/helmfile/discussions/195
    {{ if eq (requiredEnv "SKIP_SCHEMA") "true" }}
    - --no-hooks
    {{end}}

releases:
  # Our aop
  # Our app's Heml chart contains a dependent deployment for our
  - name: palolo-app
    chart: ./chart
    namespace: palolo-{{ requiredEnv "DEPLOY_ENVIRONMENT" }}
    values:
      - overrides.yaml
      - overrides-{{ requiredEnv "DEPLOY_ENVIRONMENT" }}-{{ requiredEnv "DEPLOY_REGION" }}.yaml
      - app:
          extraEnv:
            - name: AWS_REGION
              value: {{ requiredEnv "DEPLOY_REGION" }}
            - name: AWS_DEFAULT_REGION
              value: {{ requiredEnv "DEPLOY_REGION" }}
            - name: AWS_SECRETS_REGION
              value: us-west-2
            - name: PALOLO_ENV
              value: {{ requiredEnv "DEPLOY_ENVIRONMENT" }}
          ingress:
            enabled: true
            className: "alb"
            annotations:
              alb.ingress.kubernetes.io/healthcheck-path: /api/heartbeat
              alb.ingress.kubernetes.io/healthcheck-interval-seconds:	'15'
              alb.ingress.kubernetes.io/healthcheck-timeout-seconds: '5'
              alb.ingress.kubernetes.io/load-balancer-name: {{ requiredEnv "AWS_CLUSTER" }}
              external-dns.alpha.kubernetes.io/hostname: {{ requiredEnv "CLUSTER_ATTRIBUTE" }}-{{ requiredEnv "DEPLOY_ENVIRONMENT" }}.exhalefi.com, api-{{ requiredEnv "CLUSTER_ATTRIBUTE" }}-{{ requiredEnv "DEPLOY_ENVIRONMENT" }}.exhalefi.com
              external-dns.alpha.kubernetes.io/ingress-hostname-source: annotation-only
            tls:
              - hosts:
                  - "*.exhalefi.com"
                secretName: palolo-com-tls
          serviceAccount:
            annotations:
              eks.amazonaws.com/role-arn: arn:aws:iam::{{ requiredEnv "AWS_ACCOUNT_ID" }}:role/{{ requiredEnv "AWS_CLUSTER" }}-palolo-app
      - backup:
          extraEnv:
            - name: AWS_REGION
              value: {{ requiredEnv "DEPLOY_REGION" }}
            - name: AWS_DEFAULT_REGION
              value: {{ requiredEnv "DEPLOY_REGION" }}
            - name: AWS_SECRETS_REGION
              value: us-west-2
            - name: PALOLO_ENV
              value: {{ requiredEnv "DEPLOY_ENVIRONMENT" }}
          serviceAccount:
            annotations:
              eks.amazonaws.com/role-arn: arn:aws:iam::{{ requiredEnv "AWS_ACCOUNT_ID" }}:role/{{ requiredEnv "AWS_CLUSTER" }}-palolo-backup
      - worker:
          extraEnv:
            - name: AWS_REGION
              value: {{ requiredEnv "DEPLOY_REGION" }}
            - name: AWS_DEFAULT_REGION
              value: {{ requiredEnv "DEPLOY_REGION" }}
            - name: AWS_SECRETS_REGION
              value: us-west-2
            - name: PALOLO_COMMAND
              value: worker
            - name: PALOLO_ENV
              value: {{ requiredEnv "DEPLOY_ENVIRONMENT" }}
          serviceAccount:
            annotations:
              eks.amazonaws.com/role-arn: arn:aws:iam::{{ requiredEnv "AWS_ACCOUNT_ID" }}:role/{{ requiredEnv "AWS_CLUSTER" }}-palolo-app
      - image:
          repository: 686817240054.dkr.ecr.us-west-2.amazonaws.com/palolo
          tag: {{ requiredEnv "DOCKER_TAG" }}

repositories:
  - name: bitnami
    url: https://charts.bitnami.com/bitnami
