fp/Tiltfile

479 lines
13 KiB
Plaintext

## Tiltfile for working with Futureporn cluster
## remote development settings
# allow_k8s_contexts('vke-e01a95c7-aa18-45a9-b8c2-ca36b6bb33f3')
# default_registry('ttl.sh/cjfuturepornnet-98ajf9iwejf9iupawh4efu8hawe')
## don't scrub secrets so we can more easily debug
secret_settings(
disable_scrub=True
)
## cert-manager slows down Tilt updates so I prefer to keep it commented unless I specifically need to test certs
## cert-manager loaded using this extension is PAINFULLY SLOW, and it must re-install and re-test every time the Tiltfile changes.
## additionally, it is SYNCRHONOUS, which means nothing else can update until cert-manager is updated. @see https://github.com/tilt-dev/tilt-extensions/pull/90#issuecomment-704381205
## TL;DR: This is convenient, but it's much faster to use a helm chart for working with cert-manager.
# load('ext://cert_manager', 'deploy_cert_manager')
# deploy_cert_manager(
# load_to_kind=True,
# version='v1.15.1',
# )
load('ext://helm_remote', 'helm_remote')
load('ext://dotenv', 'dotenv')
dotenv(fn='.env.development')
# helm_remote(
# 'velero',
# repo_name='velero',
# repo_url='https://vmware-tanzu.github.io/helm-charts',
# namespace='futureporn',
# version='6.6.0',
# set=[
# 'configuration.backupStorageLocation[0].name=dev',
# 'configuration.backupStorageLocation[0].provider=aws',
# 'configuration.backupStorageLocation[0].bucket=futureporn-db-backup-dev',
# 'configuration.backupStorageLocation[0].config.region=us-west-000',
# 'configuration.backupStorageLocation[0].config.s3ForcePathStyle=true',
# 'configuration.backupStorageLocation[0].config.s3Url=https://s3.us-west-000.backblazeb2.com',
# 'credentials.secretContents=cloud\n[default]\naws_access_key_id=AAAA\naws_secret_access_key=BBBB',
# 'snapshotsEnabled=false',
# # --set configuration.backupStorageLocation[0].name=<BACKUP STORAGE LOCATION NAME> \
# # --set configuration.backupStorageLocation[0].provider=<PROVIDER NAME> \
# # --set configuration.backupStorageLocation[0].bucket=<BUCKET NAME> \
# # --set configuration.backupStorageLocation[0].config.region=<REGION> \
# # --set configuration.volumeSnapshotLocation[0].name=<VOLUME SNAPSHOT LOCATION NAME> \
# # --set configuration.volumeSnapshotLocation[0].provider=<PROVIDER NAME> \
# # --set configuration.volumeSnapshotLocation[0].config.region=<REGION> \
# # --set initContainers[0].name=velero-plugin-for-<PROVIDER NAME> \
# # --set initContainers[0].image=velero/velero-plugin-for-<PROVIDER NAME>:<PROVIDER PLUGIN TAG> \
# # --set initContainers[0].volumeMounts[0].mountPath=/target \
# # --set initContainers[0].volumeMounts[0].name=plugins
# ]
# )
helm_remote(
'traefik',
repo_name='traefik',
repo_url='https://traefik.github.io/charts',
namespace='futureporn',
version='28.3.0',
set=[
'globalArguments[0]=--global.sendanonymoususage=false',
'service.enabled=true',
'logs.access.enabled=true',
'logs.access.format=json',
'logs.general.level=DEBUG',
'logs.general.format=json',
'providers.kubernetesIngress.publishedService.enabled=true',
]
)
# helm_remote(
# 'nitter',
# repo_name='truecharts',
# repo_url='https://charts.truecharts.org',
# namespace='futureporn',
# version='7.1.4',
# )
# helm_remote(
# 'frp-operator',
# repo_name='frp-operator',
# repo_url='https://zufardhiyaulhaq.com/frp-operator/charts/releases/',
# namespace='futureporn',
# version='1.0.0'
# )
# helm_remote(
# 'kubernetes-ingress-controller',
# repo_name='kubernetes-ingress-controller',
# repo_url='https://ngrok.github.io/kubernetes-ingress-controller',
# namespace='futureporn',
# create_namespace='false',
# set=[
# 'credentials.apiKey=%s' % os.getenv('NGROK_API_KEY'),
# 'credentials.authtoken=%s' % os.getenv('NGROK_AUTHTOKEN')
# ]
# )
# k8s_yaml(helm(
# './charts/nitter',
# values=['./charts/nitter/values.yaml'],
# ))
k8s_yaml(helm(
'./charts/fp',
values=['./charts/fp/values.yaml'],
))
# docker_build('fp/link2cid', './packages/link2cid')
docker_build(
'fp/strapi',
'.',
dockerfile='./d.strapi.dockerfile',
target='strapi',
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./packages/strapi',
'./packages/types',
],
live_update=[
sync('./packages/strapi', '/app'),
run('cd /app && pnpm i', trigger=['./packages/strapi/package.json', './packages/strapi/pnpm-lock.yaml'])
]
)
# docker_build(
# 'fp/bot',
# '.',
# only=[
# './.npmrc',
# './package.json',
# './pnpm-lock.yaml',
# './pnpm-workspace.yaml',
# './packages/bot',
# './packages/image',
# './packages/scout',
# './packages/storage',
# './packages/workflows',
# './packages/types',
# './packages/utils',
# ],
# dockerfile='./d.bot.dockerfile',
# target='dev',
# live_update=[
# sync('./packages/bot', '/app'),
# run('cd /app && pnpm i', trigger=['./packages/bot/package.json', './packages/bot/pnpm-lock.yaml'])
# ]
# )
load('ext://uibutton', 'cmd_button')
cmd_button('postgres:create',
argv=['./scripts/postgres-create.sh'],
resource='postgresql-primary',
icon_name='dataset',
text='create (empty) databases',
)
cmd_button('postgres:restore',
argv=['dotenvx', 'run', '-f', '.env.development', '--', './scripts/postgres-restore.sh'],
resource='postgresql-primary',
icon_name='upload',
text='restore db from backup',
)
cmd_button('postgres:drop',
argv=['sh', './scripts/postgres-drop.sh'],
resource='postgresql-primary',
icon_name='delete',
text='DROP all databases'
)
cmd_button('capture-api:create',
argv=['http', '--ignore-stdin', 'POST', 'http://localhost:5003/api/record', "url='https://twitch.tv/ironmouse'", "channel='ironmouse'"],
resource='capture-api',
icon_name='send',
text='Start Recording'
)
cmd_button('postgres:graphile',
argv=['sh', './scripts/postgres-test-graphile.sh'],
resource='postgresql-primary',
icon_name='graph',
text='create graphile test job',
)
## Uncomment the following for fp/next in dev mode
## this is useful for changing the UI and seeing results
docker_build(
'fp/next',
'.',
dockerfile='d.next.dockerfile',
target='next',
build_args={
'NEXT_PUBLIC_STRAPI_URL': 'https://strapi.fp.sbtp.xyz'
},
live_update=[
sync('./packages/next', '/app')
]
)
# docker_build(
# 'fp/scout',
# '.',
# dockerfile='d.scout.dockerfile',
# target='scout',
# live_update=[
# sync('./packages/scout', '/app'),
# run('cd /app && pnpm i', trigger=['./packages/scout/package.json', './packages/scout/pnpm-lock.yaml']),
# ],
# entrypoint='pnpm nodemon --ext js,ts,json,yaml --exec node --no-warnings=ExperimentalWarning --loader ts-node/esm ./src/index.ts'
# # entrypoint='pnpm tsx watch ./src/index.ts'
# )
docker_build(
'fp/mailbox',
'.',
dockerfile='d.mailbox.dockerfile',
target='mailbox',
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./packages/image',
'./packages/scout',
'./packages/mailbox',
'./packages/types',
'./packages/utils',
'./packages/video',
'./packages/storage',
],
live_update=[
sync('./packages/mailbox', '/app'),
run('cd /app && pnpm i', trigger=['./packages/mailbox/package.json', './packages/mailbox/pnpm-lock.yaml']),
],
# entrypoint='pnpm nodemon --ext js,ts,json,yaml --exec node --no-warnings=ExperimentalWarning --loader ts-node/esm ./src/index.ts'
)
# docker_build(
# 'fp/meal',
# '.',
# dockerfile='d.meal.dockerfile',
# target='meal',
# only=[
# './.npmrc',
# './package.json',
# './pnpm-lock.yaml',
# './pnpm-workspace.yaml',
# './packages/meal',
# './packages/taco',
# './packages/types',
# ],
# live_update=[
# sync('./packages/meal', '/app'),
# # run('cd /app && pnpm i', trigger=['./packages/meal/package.json', './packages/meal/pnpm-lock.yaml']),
# ],
# )
docker_build(
'fp/capture',
'.',
dockerfile='d.capture.dockerfile',
target='capture',
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./packages/capture',
'./packages/scout',
'./packages/types',
'./packages/utils',
'./services/capture',
],
live_update=[
sync('./packages/capture/dist', '/app/dist'),
]
)
# k8s_resource(
# workload='kubernetes-ingress-controller-manager',
# links=[
# link(os.getenv('NGROK_URL'), 'Endpoint')
# ],
# labels='ngrok'
# )
# k8s_resource(
# workload='frp-operator-controller-manager',
# labels='tunnel'
# )
# k8s_resource(
# workload='echo',
# links=[
# link('https://echo.fp.sbtp.xyz'),
# link('http://echo.futureporn.svc.cluster.local:8001')
# ],
# labels='debug'
# )
k8s_resource(
workload='uppy',
links=[
link('https://uppy.fp.sbtp.xyz'),
],
resource_deps=['redis-master'],
labels=['backend'],
)
k8s_resource(
workload='next',
port_forwards=['3000'],
links=[
link('https://next.fp.sbtp.xyz'),
],
resource_deps=['strapi', 'postgresql-primary'],
labels=['frontend'],
)
k8s_resource(
workload='strapi',
port_forwards=['1339'],
links=[
link('https://strapi.fp.sbtp.xyz/admin'),
link('https://strapi.fp.sbtp.xyz'),
],
resource_deps=['postgresql-primary'],
labels=['backend'],
)
k8s_resource(
workload='postgresql-primary',
port_forwards=['5432'],
labels=['backend'],
)
k8s_resource(
workload='traefik',
port_forwards=['9000:9000'],
links=[
link('http://localhost:9000/dashboard')
],
labels=['networking'],
)
k8s_resource(
workload='mailbox',
resource_deps=['postgresql-primary', 'strapi'],
labels=['backend'],
)
# k8s_resource(
# workload='',
# )
# k8s_resource(
# workload='pgadmin',
# port_forwards=['5050'],
# resource_deps=['postgresql-primary']
# )
# k8s_resource(
# workload='nitter',
# port_forwards=['6060:10606'],
# )
# temporarily disabled to save CPU resources
# helm_remote(
# 'kube-prometheus-stack',
# repo_name='kube-prometheus-stack',
# repo_url='https://prometheus-community.github.io/helm-charts',
# namespace='futureporn',
# version='61.1.1',
# set=[
# 'prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName=vultr-block-storage',
# 'admin.existingSecret=grafana',
# 'sidecar.dashboards.enabled=true',
# 'grafana.admin.existingSecret=grafana',
# 'grafana.sidecar.dashboards.enabled=true',
# 'grafana.sidecar.dashboards.defaultFolderName=balls',
# 'grafana.sidecar.dashboards.label=grafana_dashboard',
# 'grafana.sidecar.dashboards.provider.foldersFromFileStructure=true'
# ]
# )
helm_remote(
'postgresql',
repo_name='postgresql',
repo_url='https://charts.bitnami.com/bitnami',
namespace='futureporn',
version='15.5.17',
set=[
'auth.enablePostgresUser=true',
'auth.existingSecret=postgresql',
# 'architecture=standalone',
'architecture=replication',
'readReplicas.replicaCount=3',
'replication.syncronousCommit=on',
'replication.numSyncronousReplicas=1',
'replication.applicationName=futureporn',
'image.debug=true',
'auth.usePasswordFiles=true',
]
)
## redis is only here for uppy's usage.
## If we can engineer a way to delete redis, let's do it
helm_remote(
'redis',
repo_name='redis',
repo_url='https://charts.bitnami.com/bitnami',
namespace='futureporn',
version='19.6.1',
set=[
'auth.existingSecret=redis',
'auth.existingSecretPasswordKey=password',
'replica.persistence.enabled=false',
'architecture=standalone'
]
)
k8s_resource(
workload='external-dns',
labels=['networking'],
)
k8s_resource(
workload='cert-manager-webhook-exoscale',
labels=['networking'],
)
k8s_resource(
workload='redis-master',
labels=['backend']
)
k8s_resource(
workload='postgresql-read',
labels=['backend']
)
# k8s_resource(
# workload='bot',
# labels=['backend'],
# # resource_deps=['strapi'],
# )
k8s_resource(
workload='capture-api',
port_forwards=['5003'],
labels=['backend'],
)
k8s_resource(
workload='capture-worker',
labels=['backend'],
)