fp/Tiltfile

625 lines
17 KiB
Plaintext

## Tiltfile for working with Futureporn cluster
## remote development settings
# allow_k8s_contexts('vke-e01a95c7-aa18-45a9-b8c2-ca36b6bb33f3')
# default_registry('ttl.sh/cjfuturepornnet-98ajf9iwejf9iupawh4efu8hawe')
## don't scrub secrets so we can more easily debug
secret_settings(
disable_scrub=True
)
## @warning Don't use kubefwd, it's unreliable. In my testing, it deleted important /etc/hosts entries.
## Our workaround for SSL in dev is to use a VPS with caddy and chisel.
## Caddy handles SSL certs/termination and chisel proxies into our cluster.
## This means that cert-manager is only used in staging and production env (not development.)
## This also means Gateway and HTTPRoute is only used in staging and production.
## @todo Caddy/Chisel is not ideal since that setup is out-of-scope of the monorepo. For now it's the best solution because it unblocks our DX
##
## @see https://blog.tilt.dev/2021/09/09/kubefwd-operator.html
# v1alpha1.extension_repo(name='default', url='https://github.com/tilt-dev/tilt-extensions')
# v1alpha1.extension(name='kubefwd:config', repo_name='default', repo_path='kubefwd')
## helm_remote and deploy_cert_manager are BANNED because they use too much bandwidth and crash my computer
##
## cert-manager slows down Tilt updates so I prefer to keep it commented unless I specifically need to test certs
## cert-manager loaded using this extension is PAINFULLY SLOW, and it must re-install and re-test every time the Tiltfile changes.
## additionally, it is SYNCRHONOUS, which means nothing else can update until cert-manager is updated. @see https://github.com/tilt-dev/tilt-extensions/pull/90#issuecomment-704381205
## TL;DR: This is convenient, but it's much faster to use a helm chart for working with cert-manager.
# load('ext://cert_manager', 'deploy_cert_manager')
# deploy_cert_manager(
# load_to_kind=True,
# version='v1.15.1',
# )
# load('ext://helm_remote', 'helm_remote')
load('ext://dotenv', 'dotenv')
dotenv(fn='.env.development')
## Right now we use Tilt/Helm in dev and Flux/Kustomize/Helm in production.
## It is a pipedream to use the same deployment/templating tool in development as we do in production. This vastly simplifies deployment.
## We can't use Flux in development unless we figure out a way for flux/kustomize to reference our fp Helm chart as a relative directory.
## Right now, we reference gitea.futureporn.net (kind: GitRepository) where Kustomize downloads the fp Helm chart.
## We could possibly rewrite our fp Helm chart as a Kustomization and then deprecate Helm in development.
## k8s_yaml(kustomize('./flux/apps/development'))
## We are constrained to CrossNamespaceObjectReference kind list
## @see https://fluxcd.io/flux/components/helm/api/v2/#helm.toolkit.fluxcd.io/v2.CrossNamespaceObjectReference
## @see https://github.com/fluxcd/helm-controller/blob/c8ae4b6ad225d37b19bacb634db784d6096908ac/api/v2beta2/reference_types.go#L53
# helm_remote(
# 'velero',
# repo_name='velero',
# repo_url='https://vmware-tanzu.github.io/helm-charts',
# namespace='futureporn',
# version='6.6.0',
# set=[
# 'configuration.backupStorageLocation[0].name=dev',
# 'configuration.backupStorageLocation[0].provider=aws',
# 'configuration.backupStorageLocation[0].bucket=futureporn-db-backup-dev',
# 'configuration.backupStorageLocation[0].config.region=us-west-000',
# 'configuration.backupStorageLocation[0].config.s3ForcePathStyle=true',
# 'configuration.backupStorageLocation[0].config.s3Url=https://s3.us-west-000.backblazeb2.com',
# 'credentials.secretContents=cloud\n[default]\naws_access_key_id=AAAA\naws_secret_access_key=BBBB',
# 'snapshotsEnabled=false',
# # --set configuration.backupStorageLocation[0].name=<BACKUP STORAGE LOCATION NAME> \
# # --set configuration.backupStorageLocation[0].provider=<PROVIDER NAME> \
# # --set configuration.backupStorageLocation[0].bucket=<BUCKET NAME> \
# # --set configuration.backupStorageLocation[0].config.region=<REGION> \
# # --set configuration.volumeSnapshotLocation[0].name=<VOLUME SNAPSHOT LOCATION NAME> \
# # --set configuration.volumeSnapshotLocation[0].provider=<PROVIDER NAME> \
# # --set configuration.volumeSnapshotLocation[0].config.region=<REGION> \
# # --set initContainers[0].name=velero-plugin-for-<PROVIDER NAME> \
# # --set initContainers[0].image=velero/velero-plugin-for-<PROVIDER NAME>:<PROVIDER PLUGIN TAG> \
# # --set initContainers[0].volumeMounts[0].mountPath=/target \
# # --set initContainers[0].volumeMounts[0].name=plugins
# ]
# )
# helm_remote(
# 'nitter',
# repo_name='truecharts',
# repo_url='https://charts.truecharts.org',
# namespace='futureporn',
# version='7.1.4',
# )
# helm_remote(
# 'kubernetes-ingress-controller',
# repo_name='kubernetes-ingress-controller',
# repo_url='https://ngrok.github.io/kubernetes-ingress-controller',
# namespace='futureporn',
# create_namespace='false',
# set=[
# 'credentials.apiKey=%s' % os.getenv('NGROK_API_KEY'),
# 'credentials.authtoken=%s' % os.getenv('NGROK_AUTHTOKEN')
# ]
# )
# k8s_yaml(helm(
# './charts/nitter',
# values=['./charts/nitter/values.yaml'],
# ))
k8s_yaml(helm(
'./charts/traefik/traefik',
namespace='futureporn',
values=[
'./charts/traefik/values-overrides.yaml'
]
))
k8s_yaml(helm(
'./charts/fp',
values=['./charts/fp/values.yaml'],
))
# we are using a local helm chart instead of using helm_remote because that command makes the tilt builds Hella slow.
# to download this chart, we used the following commands.
# future re-pulling is needed to keep things up-to-date.
#
# helm repo add bitnami https://charts.bitnami.com/bitnami
# helm pull bitnami/postgresql --untar --destination ./charts/postgresql
k8s_yaml(helm(
'./charts/postgresql/postgresql',
namespace='futureporn',
values=[
'./charts/postgresql/values-overrides.yaml'
]
))
k8s_yaml(helm(
'./charts/velero/velero',
namespace='velero',
values=[
'./charts/velero/values.yaml'
]
))
# k8s_yaml(helm(
# './charts/drupal/drupal',
# namespace='futureporn',
# values=[
# './charts/drupal/values-overrides.yaml'
# ]
# ))
# k8s_yaml(helm(
# './charts/phpmyadmin/phpmyadmin',
# namespace='futureporn',
# values=[
# './charts/phpmyadmin/values-overrides.yaml'
# ]
# ))
# k8s_yaml(helm(
# './charts/mariadb/mariadb',
# namespace='futureporn',
# values=[
# './charts/mariadb/values-overrides.yaml'
# ]
# ))
# k8s_yaml(helm(
# './charts/external-secrets/external-secrets',
# namespace='futureporn',
# ))
# ## redis is for uppy
# ## before you think of switching to valkey, dragonfly, or one of the other redis alternatives, STOP. Uppy is picky.
# ## I tested dragonfly, valkey, and KeyDB. Uppy's ioredis client was unable to connect. "ECONNREFUSED" ...
# ## Uppy was only happy connecting to official redis.
k8s_yaml(helm(
'./charts/redis/redis',
namespace='futureporn',
values=[
'./charts/redis/values-overrides.yaml'
]
))
k8s_yaml(helm(
'./charts/cert-manager/cert-manager',
namespace='cert-manager',
values=['./charts/cert-manager/values-overrides.yaml']
))
k8s_yaml(helm(
'./charts/chisel-operator/chisel-operator',
namespace='futureporn',
values=['./charts/chisel-operator/values-overrides.yaml']
))
k8s_resource(
workload='chisel-operator',
labels=['networking'],
)
## ngrok
# k8s_yaml(helm(
# './charts/kubernetes-ingress-controller/kubernetes-ingress-controller',
# namespace='futureporn',
# values=['./charts/kubernetes-ingress-controller/values-overrides.yaml']
# ))
# docker_build('fp/link2cid', './packages/link2cid')
docker_build(
'fp/bot',
'.',
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./services/bot',
'./packages/types',
'./packages/utils',
'./packages/fetchers',
],
dockerfile='./dockerfiles/bot.dockerfile',
target='dev',
live_update=[
sync('./services/bot', '/app/services/bot')
]
)
docker_build(
'fp/scout',
'.',
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./packages/types',
'./packages/utils',
'./packages/fetchers',
'./services/scout',
],
dockerfile='./dockerfiles/scout.dockerfile',
target='dev',
# target='prod',
live_update=[
sync('./services/scout', '/app/services/scout')
]
)
load('ext://uibutton', 'cmd_button')
cmd_button('supertokens:seed',
argv=['./scripts/supertokens-seed.sh'],
resource='supertokens',
icon_name='start',
text='create supertokens database',
)
cmd_button('postgres:restore',
argv=['./scripts/postgres-restore.sh'],
resource='postgresql-primary',
icon_name='upload',
text='restore db from backup',
)
cmd_button('postgres:drop',
argv=['sh', './scripts/postgres-drop.sh'],
resource='postgresql-primary',
icon_name='delete',
text='DROP all databases'
)
cmd_button('postgres:refresh',
argv=['echo', '@todo please restart postgrest container manually.'],
resource='migrations',
icon_name='refresh',
text='Refresh schema cache'
)
## @todo let's make this get a random room from scout then use the random room to record via POST /recordings
cmd_button('capture-worker:create',
argv=['./scripts/capture-integration.sh'],
resource='capture-worker',
icon_name='send',
text='Recording Integration Test'
)
# cmd_button('drupal:init',
# argv=['./scripts/drupal-init-wrapper.sh'],
# resource='drupal',
# icon_name='send',
# text='Initialize Drupal'
# )
cmd_button('postgres:migrate',
argv=['./scripts/postgres-migrations.sh'],
resource='postgresql-primary',
icon_name='directions_run',
text='Run migrations',
)
cmd_button('pgadmin4:restore',
argv=['./scripts/pgadmin-import-connection.sh'],
resource='pgadmin4',
icon_name='hub',
text='import connection',
)
cmd_button('build:test',
argv=['./scripts/build-test.sh'],
resource='build',
icon_name='build',
text='test',
)
## we ignore unused image warnings because we do actually use this image.
## instead of being invoked by helm, we start a container using this image manually via Tilt UI
# update_settings(suppress_unused_image_warnings=["fp/migrations"])
docker_build(
'fp/migrations',
'.',
dockerfile='dockerfiles/migrations.dockerfile',
target='migrations',
pull=False,
)
## Uncomment the following for fp/next in dev mode
## this is useful for changing the UI and seeing results
docker_build(
'fp/next',
'.',
dockerfile='dockerfiles/next.dockerfile',
target='dev',
live_update=[
sync('./services/next', '/app/services/next')
],
pull=False,
)
docker_build(
'fp/factory',
'.',
dockerfile='./dockerfiles/factory.dockerfile',
target='dev',
live_update=[
sync('./services/factory', '/app/services/factory')
],
pull=False,
)
# docker_build(
# 'fp/mailbox',
# '.',
# dockerfile='dockerfiles/mailbox.dockerfile',
# target='mailbox',
# only=[
# './.npmrc',
# './package.json',
# './pnpm-lock.yaml',
# './pnpm-workspace.yaml',
# './services/mailbox',
# './packages/types',
# './packages/utils',
# './packages/fetchers',
# './packages/video',
# './packages/storage',
# ],
# live_update=[
# sync('./services/mailbox', '/app'),
# run('cd /app && pnpm i', trigger=['./services/mailbox/package.json', './services/mailbox/pnpm-lock.yaml']),
# ],
# pull=False,
# # entrypoint='pnpm nodemon --ext js,ts,json,yaml --exec node --no-warnings=ExperimentalWarning --loader ts-node/esm ./src/index.ts'
# )
docker_build(
'fp/capture',
'.',
dockerfile='dockerfiles/capture.dockerfile',
target='dev',
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./packages/types',
'./packages/utils',
'./packages/fetchers',
'./services/capture',
],
live_update=[
sync('./services/capture', '/app/services/capture'),
],
pull=False,
)
k8s_resource(
workload='scout',
resource_deps=['postgresql-primary'],
# port_forwards=['8134'],
labels=['backend'],
)
k8s_resource(
workload='uppy',
links=[
link('https://uppy.fp.sbtp.xyz'),
],
resource_deps=['redis-master'],
labels=['backend'],
)
k8s_resource(
workload='next',
links=[
link('https://next.fp.sbtp.xyz'),
link('https://next.fp.sbtp.xyz/api/auth/dashboard'),
],
resource_deps=['postgrest', 'postgresql-primary'],
labels=['frontend'],
)
# whoami is for testing routing
k8s_resource(
workload='whoami',
labels=['frontend'],
links=[
link('https://whoami.fp.sbtp.xyz/')
]
)
k8s_resource(
workload='postgresql-primary',
# port_forwards=['5432'],
labels=['database'],
)
k8s_resource(
workload='postgresql-read',
labels=['database']
)
# k8s_resource(
# workload='mariadb',
# labels=['database']
# )
# k8s_resource(
# workload='drupal',
# resource_deps=['mariadb'],
# labels=['backend'],
# port_forwards=['9797:8080'],
# links=[
# link('https://drupal.fp.sbtp.xyz'),
# ],
# )
k8s_resource(
workload='chart-velero',
resource_deps=['postgresql-primary'],
labels=['backend'],
)
k8s_resource(
workload='chart-velero-upgrade-crds',
resource_deps=['postgresql-primary'],
labels=['backend'],
)
# k8s_resource(
# workload='logto',
# port_forwards=['3001', '3002'],
# links=[
# link('https://logto.fp.sbtp.xyz'),
# link('https://logto-admin.fp.sbtp.xyz'),
# ],
# labels=['backend'],
# )
# k8s_resource(
# workload='logto-database-seed',
# labels=['database'],
# )
# k8s_resource(
# workload='phpmyadmin',
# port_forwards=['5151:8080'],
# labels=['database'],
# )
k8s_resource(
workload='supertokens',
links=[
link('https://supertokens.fp.sbtp.xyz'),
],
labels=['backend'],
)
# k8s_resource(
# workload='mailbox',
# resource_deps=['postgresql-primary', 'postgrest'],
# labels=['backend'],
# )
# k8s_resource(
# workload='nitter',
# port_forwards=['6060:10606'],
# )
# temporarily disabled to save CPU resources
# helm_remote(
# 'kube-prometheus-stack',
# repo_name='kube-prometheus-stack',
# repo_url='https://prometheus-community.github.io/helm-charts',
# namespace='futureporn',
# version='61.1.1',
# set=[
# 'prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName=vultr-block-storage',
# 'admin.existingSecret=grafana',
# 'sidecar.dashboards.enabled=true',
# 'grafana.admin.existingSecret=grafana',
# 'grafana.sidecar.dashboards.enabled=true',
# 'grafana.sidecar.dashboards.defaultFolderName=balls',
# 'grafana.sidecar.dashboards.label=grafana_dashboard',
# 'grafana.sidecar.dashboards.provider.foldersFromFileStructure=true'
# ]
# )
k8s_resource(
workload='external-dns',
labels=['networking'],
)
k8s_resource(
workload='cert-manager-webhook-exoscale',
labels=['networking'],
)
k8s_resource(
workload='factory',
labels=['backend'],
)
k8s_resource(
workload='redis-master',
labels=['cache']
)
k8s_resource(
workload='bot',
labels=['backend'],
resource_deps=['postgrest'],
)
k8s_resource(
workload='capture-worker',
labels=['backend'],
resource_deps=['postgrest', 'postgresql-primary'],
)
# k8s_resource(
# workload='chihaya',
# labels=['backend']
# )
k8s_resource(
workload='postgrest',
# port_forwards=['9000'],
labels=['database'],
links=[
link('https://postgrest.fp.sbtp.xyz'),
],
resource_deps=['postgresql-primary'],
)
k8s_resource(
workload='traefik',
links=[
link('https://traefik.fp.sbtp.xyz/whoami'),
link('https://traefik.fp.sbtp.xyz/postgrest'),
],
labels=['networking'],
)
k8s_resource(
workload='pgadmin4',
# port_forwards=['5050:80'],
labels=['database'],
)
k8s_resource(
workload='migrations',
labels=['database'],
resource_deps=['postgresql-primary'],
)
k8s_resource(
workload='cert-manager',
labels=['certificates'],
)
k8s_resource(
workload='cert-manager-cainjector',
labels=['certificates'],
)
k8s_resource(
workload='cert-manager-webhook',
labels=['certificates'],
)
k8s_resource(
workload='cert-manager-startupapicheck',
labels=['certificates'],
)