fp/apps/base/windmill/windmill.yaml

149 lines
4.8 KiB
YAML

---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: HelmRepository
metadata:
name: bitnami
namespace: futureporn
spec:
type: "oci"
interval: 24h
url: oci://registry-1.docker.io/bitnamicharts
---
apiVersion: helm.toolkit.fluxcd.io/v2beta2
kind: HelmRelease
metadata:
name: postgresql
namespace: futureporn
spec:
interval: 24h
chart:
spec:
chart: postgresql
version: "12.3.1"
sourceRef:
kind: HelmRepository
name: bitnami
values:
postgresql:
fullnameOverride: windmill-postgresql-mine
primary:
persistence:
# It would be $0.20/mo cheaper to use 8Gi NVMe,
# but it's not available in Vultr's DFW datacenter
# so instead we use the minimum HDD size, 40Gi
storageClass: "vultr-block-storage-hdd"
size: 40Gi
resources:
limits:
cpu: "1"
memory: "2Gi"
requests:
cpu: "250m"
memory: "1024Mi"
auth:
postgresPassword: windmill
database: windmill
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: HelmRepository
metadata:
name: windmill
namespace: futureporn
spec:
interval: 10m
url: https://windmill-labs.github.io/windmill-helm-charts
---
apiVersion: helm.toolkit.fluxcd.io/v2beta2
kind: HelmRelease
metadata:
name: windmill
namespace: futureporn
spec:
interval: 60m
chart:
spec:
chart: windmill
version: "2.0.170"
sourceRef:
kind: HelmRepository
name: windmill
values:
postgresql:
enabled: false
windmill:
# domain as shown in browser, this is used together with `baseProtocol` as part of the BASE_URL environment variable in app and worker container and in the ingress resource, if enabled
baseDomain: windmill2.sbtp.xyz
baseProtocol: https
# postgres URI, pods will crashloop if database is unreachable, sets DATABASE_URL environment variable in app and worker container
databaseUrl: valueFrom:postgres://postgres:windmill@windmill-postgresql/windmill?sslmode=disable
# replica for the application app
appReplicas: 2
# replicas for the workers, jobs are executed on the workers
lspReplicas: 2
workerGroups:
# The default worker group is the one that will execute jobs with any taggs except the native ones. Windmill has a default worker group configuration for it
- name: "default"
replicas: 3
# -- Annotations to apply to the pods
annotations: {}
# -- Labels to apply to the pods
labels: {}
# -- Node selector to use for scheduling the pods
nodeSelector: {}
# -- Tolerations to apply to the pods
tolerations: []
# -- Affinity rules to apply to the pods
affinity: {}
# -- Resource limits and requests for the pods
resources:
requests:
memory: "1028Mi"
cpu: "500m"
limits:
memory: "2048Mi"
cpu: "1000m"
# -- Extra environment variables to apply to the pods
extraEnv: []
# -- Extra sidecar containers
extraContainers: []
# -- Mode for workers, defaults to "worker" - alternative "agent" requires Enterprise license
mode: "worker"
# Thenative worker group will only execute native jobs. Windmill has a default worker group configuration for it
- name: "native"
replicas: 4
# -- Resource limits and requests for the pods
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "200m"
# -- Extra environment variables to apply to the pods
extraEnv: []
# -- Extra sidecar containers
extraContainers: []
# -- Mode for workers, defaults to "worker" - alternative "agent" requires Enterprise license
mode: "worker"
- name: "gpu"
replicas: 0
# Use those to override the tag or image used for the app and worker containers. Windmill uses the same image for both.
# By default, if enterprise is enable, the image is set to ghcr.io/windmill-labs/windmill-ee, otherwise the image is set to ghcr.io/windmill-labs/windmill
#tag: "mytag"
#image: "ghcr.io/windmill-labs/windmill"
# enable postgres (bitnami) on kubernetes
# enable minio (bitnami) on kubernetes
minio:
enabled: false
ingress:
enabled: true
className: nginx
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/cluster-issuer: "letsencrypt"
tls:
- secretName: windmill-tls
hosts:
- windmill2.sbtp.xyz
enterprise:
enable: false