fp/Tiltfile

568 lines
14 KiB
Plaintext

## Tiltfile for working with Futureporn cluster
## remote development settings
# allow_k8s_contexts('vke-e01a95c7-aa18-45a9-b8c2-ca36b6bb33f3')
# default_registry('ttl.sh/cjfuturepornnet-98ajf9iwejf9iupawh4efu8hawe')
## don't scrub secrets so we can more easily debug
secret_settings(
disable_scrub=True
)
## helm_remote and deploy_cert_manager are BANNED because they use too much bandwidth and crash my computer
##
## cert-manager slows down Tilt updates so I prefer to keep it commented unless I specifically need to test certs
## cert-manager loaded using this extension is PAINFULLY SLOW, and it must re-install and re-test every time the Tiltfile changes.
## additionally, it is SYNCRHONOUS, which means nothing else can update until cert-manager is updated. @see https://github.com/tilt-dev/tilt-extensions/pull/90#issuecomment-704381205
## TL;DR: This is convenient, but it's much faster to use a helm chart for working with cert-manager.
# load('ext://cert_manager', 'deploy_cert_manager')
# deploy_cert_manager(
# load_to_kind=True,
# version='v1.15.1',
# )
# load('ext://helm_remote', 'helm_remote')
load('ext://dotenv', 'dotenv')
dotenv(fn='.env.development')
# helm_remote(
# 'velero',
# repo_name='velero',
# repo_url='https://vmware-tanzu.github.io/helm-charts',
# namespace='futureporn',
# version='6.6.0',
# set=[
# 'configuration.backupStorageLocation[0].name=dev',
# 'configuration.backupStorageLocation[0].provider=aws',
# 'configuration.backupStorageLocation[0].bucket=futureporn-db-backup-dev',
# 'configuration.backupStorageLocation[0].config.region=us-west-000',
# 'configuration.backupStorageLocation[0].config.s3ForcePathStyle=true',
# 'configuration.backupStorageLocation[0].config.s3Url=https://s3.us-west-000.backblazeb2.com',
# 'credentials.secretContents=cloud\n[default]\naws_access_key_id=AAAA\naws_secret_access_key=BBBB',
# 'snapshotsEnabled=false',
# # --set configuration.backupStorageLocation[0].name=<BACKUP STORAGE LOCATION NAME> \
# # --set configuration.backupStorageLocation[0].provider=<PROVIDER NAME> \
# # --set configuration.backupStorageLocation[0].bucket=<BUCKET NAME> \
# # --set configuration.backupStorageLocation[0].config.region=<REGION> \
# # --set configuration.volumeSnapshotLocation[0].name=<VOLUME SNAPSHOT LOCATION NAME> \
# # --set configuration.volumeSnapshotLocation[0].provider=<PROVIDER NAME> \
# # --set configuration.volumeSnapshotLocation[0].config.region=<REGION> \
# # --set initContainers[0].name=velero-plugin-for-<PROVIDER NAME> \
# # --set initContainers[0].image=velero/velero-plugin-for-<PROVIDER NAME>:<PROVIDER PLUGIN TAG> \
# # --set initContainers[0].volumeMounts[0].mountPath=/target \
# # --set initContainers[0].volumeMounts[0].name=plugins
# ]
# )
# helm_remote(
# 'nitter',
# repo_name='truecharts',
# repo_url='https://charts.truecharts.org',
# namespace='futureporn',
# version='7.1.4',
# )
# helm_remote(
# 'frp-operator',
# repo_name='frp-operator',
# repo_url='https://zufardhiyaulhaq.com/frp-operator/charts/releases/',
# namespace='futureporn',
# version='1.0.0'
# )
# helm_remote(
# 'kubernetes-ingress-controller',
# repo_name='kubernetes-ingress-controller',
# repo_url='https://ngrok.github.io/kubernetes-ingress-controller',
# namespace='futureporn',
# create_namespace='false',
# set=[
# 'credentials.apiKey=%s' % os.getenv('NGROK_API_KEY'),
# 'credentials.authtoken=%s' % os.getenv('NGROK_AUTHTOKEN')
# ]
# )
# k8s_yaml(helm(
# './charts/nitter',
# values=['./charts/nitter/values.yaml'],
# ))
k8s_yaml(helm(
'./charts/fp',
values=['./charts/fp/values.yaml'],
))
## we are using a local helm chart instead of using helm_remote because that command makes the tilt builds Hella slow.
## to download this chart, we used the following commands.
## future re-pulling is needed to keep things up-to-date.
##
## helm repo add bitnami https://charts.bitnami.com/bitnami
## helm pull bitnami/postgresql --untar --destination ./charts/postgresql
k8s_yaml(helm(
'./charts/postgresql/postgresql',
namespace='futureporn',
values=[
'./charts/postgresql/values-overrides.yaml'
]
))
k8s_yaml(helm(
'./charts/velero/velero',
namespace='velero',
values=[
'./charts/velero/values.yaml'
]
))
# k8s_yaml(helm(
# './charts/external-secrets/external-secrets',
# namespace='futureporn',
# ))
k8s_yaml(helm(
'./charts/traefik/traefik',
namespace='futureporn',
values=[
'./charts/traefik/values-overrides.yaml'
]
))
## redis is for uppy
## before you think of switching to valkey, dragonfly, or one of the other redis alternatives, STOP. Uppy is picky.
## I tested dragonfly, valkey, and KeyDB. Uppy's ioredis client was unable to connect. "ECONNREFUSED" ...
## Uppy was only happy connecting to official redis.
k8s_yaml(helm(
'./charts/redis/redis',
namespace='futureporn',
values=[
'./charts/redis/values-overrides.yaml'
]
))
k8s_yaml(helm(
'./charts/cert-manager/cert-manager',
namespace='cert-manager',
values=['./charts/cert-manager/values-overrides.yaml']
))
## chisel-operator helm chart is not ready for general use. It has an invalid DNS name, "v1.1" that prevents successful deployment
## instead, we use kustomize to deploy
## @see ./Makefile
# k8s_yaml(helm(
# './charts/chisel-operator/chisel-operator',
# namespace='futureporn',
# values=['./charts/chisel-operator/values-overrides.yaml']
# ))
## ngrok
# k8s_yaml(helm(
# './charts/kubernetes-ingress-controller/kubernetes-ingress-controller',
# namespace='futureporn',
# values=['./charts/kubernetes-ingress-controller/values-overrides.yaml']
# ))
# docker_build('fp/link2cid', './packages/link2cid')
docker_build(
'fp/strapi',
'.',
dockerfile='./dockerfiles/strapi.dockerfile',
target='strapi',
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./services/strapi',
'./packages/types',
],
live_update=[
sync('./services/strapi', '/usr/src/app')
],
pull=False,
)
docker_build(
'fp/bot',
'.',
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./services/bot',
'./packages/types',
'./packages/utils',
'./packages/fetchers',
],
dockerfile='./dockerfiles/bot.dockerfile',
target='dev',
live_update=[
sync('./services/bot', '/app/services/bot')
]
)
docker_build(
'fp/scout',
'.',
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./packages/types',
'./packages/utils',
'./packages/fetchers',
'./services/scout',
],
dockerfile='./dockerfiles/scout.dockerfile',
target='dev',
# target='prod',
live_update=[
sync('./services/scout', '/app/services/scout')
]
)
load('ext://uibutton', 'cmd_button')
cmd_button('postgres:restore',
argv=['./scripts/postgres-restore.sh'],
resource='postgresql-primary',
icon_name='upload',
text='restore db from backup',
)
cmd_button('postgres:drop',
argv=['sh', './scripts/postgres-drop.sh'],
resource='postgresql-primary',
icon_name='delete',
text='DROP all databases'
)
cmd_button('postgres:refresh',
argv=['echo', '@todo please restart postgrest container manually.'],
resource='migrations',
icon_name='refresh',
text='Refresh schema cache'
)
## @todo let's make this get a random room from scout then use the random room to record via POST /recordings
cmd_button('capture-worker:create',
argv=['./scripts/capture-integration.sh'],
resource='capture-worker',
icon_name='send',
text='Recording Integration Test'
)
cmd_button('postgres:migrate',
argv=['./scripts/postgres-migrations.sh'],
resource='postgresql-primary',
icon_name='directions_run',
text='Run migrations',
)
cmd_button('pgadmin4:restore',
argv=['./scripts/pgadmin-import-connection.sh'],
resource='pgadmin4',
icon_name='hub',
text='import connection',
)
cmd_button('build:test',
argv=['./scripts/build-test.sh'],
resource='build',
icon_name='build',
text='test',
)
## we ignore unused image warnings because we do actually use this image.
## instead of being invoked by helm, we start a container using this image manually via Tilt UI
# update_settings(suppress_unused_image_warnings=["fp/migrations"])
docker_build(
'fp/migrations',
'.',
dockerfile='dockerfiles/migrations.dockerfile',
target='migrations',
pull=False,
)
## Uncomment the following for fp/next in dev mode
## this is useful for changing the UI and seeing results
docker_build(
'fp/next',
'.',
dockerfile='dockerfiles/next.dockerfile',
target='dev',
build_args={
'NEXT_PUBLIC_STRAPI_URL': 'https://strapi.fp.sbtp.xyz',
},
live_update=[
sync('./services/next', '/app/services/next')
],
pull=False,
)
docker_build(
'fp/build',
'.',
dockerfile='./dockerfiles/build.dockerfile',
target='dev',
live_update=[
sync('./services/build', '/app/services/build')
],
pull=False,
)
docker_build(
'fp/mailbox',
'.',
dockerfile='dockerfiles/mailbox.dockerfile',
target='mailbox',
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./services/mailbox',
'./packages/types',
'./packages/utils',
'./packages/fetchers',
'./packages/video',
'./packages/storage',
],
live_update=[
sync('./services/mailbox', '/app'),
run('cd /app && pnpm i', trigger=['./services/mailbox/package.json', './services/mailbox/pnpm-lock.yaml']),
],
pull=False,
# entrypoint='pnpm nodemon --ext js,ts,json,yaml --exec node --no-warnings=ExperimentalWarning --loader ts-node/esm ./src/index.ts'
)
docker_build(
'fp/capture',
'.',
dockerfile='dockerfiles/capture.dockerfile',
target='dev',
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./packages/types',
'./packages/utils',
'./packages/fetchers',
'./services/capture',
],
live_update=[
sync('./services/capture', '/app/services/capture'),
],
pull=False,
)
# k8s_resource(
# workload='kubernetes-ingress-controller-manager',
# links=[
# link(os.getenv('NGROK_URL'), 'Endpoint')
# ],
# labels='ngrok'
# )
# k8s_resource(
# workload='frp-operator-controller-manager',
# labels='tunnel'
# )
# k8s_resource(
# workload='echo',
# links=[
# link('https://echo.fp.sbtp.xyz'),
# link('http://echo.futureporn.svc.cluster.local:8001')
# ],
# labels='debug'
# )
k8s_resource(
workload='scout',
resource_deps=['postgresql-primary'],
port_forwards=['8134'],
labels=['backend'],
)
k8s_resource(
workload='uppy',
links=[
link('https://uppy.fp.sbtp.xyz'),
],
resource_deps=['redis-master'],
labels=['backend'],
)
k8s_resource(
workload='next',
port_forwards=['3000'],
links=[
link('https://next.fp.sbtp.xyz'),
],
resource_deps=['postgrest', 'postgresql-primary'],
labels=['frontend'],
)
k8s_resource(
workload='strapi',
port_forwards=['1339'],
links=[
link('https://strapi.fp.sbtp.xyz/admin'),
link('https://strapi.fp.sbtp.xyz'),
],
resource_deps=['postgresql-primary'],
labels=['backend'],
)
k8s_resource(
workload='game-2048',
port_forwards=['8088:80'],
labels=['frontend'],
links=[
link('https://game-2048.fp.sbtp.xyz/')
]
)
k8s_resource(
workload='whoami',
labels=['frontend'],
links=[
link('https://whoami.fp.sbtp.xyz/')
]
)
k8s_resource(
workload='postgresql-primary',
port_forwards=['5432'],
labels=['database'],
)
k8s_resource(
workload='postgresql-read',
labels=['database']
)
k8s_resource(
workload='mailbox',
resource_deps=['postgresql-primary', 'strapi'],
labels=['backend'],
)
k8s_resource(
workload='build',
resource_deps=['postgrest'],
labels=['backend'],
)
# k8s_resource(
# workload='nitter',
# port_forwards=['6060:10606'],
# )
# temporarily disabled to save CPU resources
# helm_remote(
# 'kube-prometheus-stack',
# repo_name='kube-prometheus-stack',
# repo_url='https://prometheus-community.github.io/helm-charts',
# namespace='futureporn',
# version='61.1.1',
# set=[
# 'prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName=vultr-block-storage',
# 'admin.existingSecret=grafana',
# 'sidecar.dashboards.enabled=true',
# 'grafana.admin.existingSecret=grafana',
# 'grafana.sidecar.dashboards.enabled=true',
# 'grafana.sidecar.dashboards.defaultFolderName=balls',
# 'grafana.sidecar.dashboards.label=grafana_dashboard',
# 'grafana.sidecar.dashboards.provider.foldersFromFileStructure=true'
# ]
# )
k8s_resource(
workload='external-dns',
labels=['networking'],
)
k8s_resource(
workload='cert-manager-webhook-exoscale',
labels=['networking'],
)
# k8s_resource(
# workload='ngrok-manager',
# labels=['networking'],
# )
k8s_resource(
workload='redis-master',
labels=['cache']
)
k8s_resource(
workload='bot',
labels=['backend'],
resource_deps=['postgrest'],
)
k8s_resource(
workload='capture-worker',
labels=['backend'],
resource_deps=['postgrest', 'postgresql-primary'],
)
k8s_resource(
workload='chihaya',
labels=['backend']
)
k8s_resource(
workload='postgrest',
port_forwards=['9000'],
labels=['database'],
resource_deps=['postgresql-primary'],
)
k8s_resource(
workload='pgadmin4',
port_forwards=['5050:80'],
labels=['database'],
)
k8s_resource(
workload='migrations',
labels=['database'],
resource_deps=['postgresql-primary'],
)
k8s_resource(
workload='cert-manager',
labels=['certificates'],
)
k8s_resource(
workload='cert-manager-cainjector',
labels=['certificates'],
)
k8s_resource(
workload='cert-manager-webhook',
labels=['certificates'],
)
k8s_resource(
workload='cert-manager-startupapicheck',
labels=['certificates'],
)