## Tiltfile for working with Futureporn cluster ## remote development settings # allow_k8s_contexts('vke-e01a95c7-aa18-45a9-b8c2-ca36b6bb33f3') # default_registry('ttl.sh/cjfuturepornnet-98ajf9iwejf9iupawh4efu8hawe') ## don't scrub secrets so we can more easily debug secret_settings( disable_scrub=True ) ## helm_remote and deploy_cert_manager are BANNED because they use too much bandwidth and crash my computer ## ## cert-manager slows down Tilt updates so I prefer to keep it commented unless I specifically need to test certs ## cert-manager loaded using this extension is PAINFULLY SLOW, and it must re-install and re-test every time the Tiltfile changes. ## additionally, it is SYNCRHONOUS, which means nothing else can update until cert-manager is updated. @see https://github.com/tilt-dev/tilt-extensions/pull/90#issuecomment-704381205 ## TL;DR: This is convenient, but it's much faster to use a helm chart for working with cert-manager. # load('ext://cert_manager', 'deploy_cert_manager') # deploy_cert_manager( # load_to_kind=True, # version='v1.15.1', # ) # load('ext://helm_remote', 'helm_remote') load('ext://dotenv', 'dotenv') dotenv(fn='.env.development') # helm_remote( # 'velero', # repo_name='velero', # repo_url='https://vmware-tanzu.github.io/helm-charts', # namespace='futureporn', # version='6.6.0', # set=[ # 'configuration.backupStorageLocation[0].name=dev', # 'configuration.backupStorageLocation[0].provider=aws', # 'configuration.backupStorageLocation[0].bucket=futureporn-db-backup-dev', # 'configuration.backupStorageLocation[0].config.region=us-west-000', # 'configuration.backupStorageLocation[0].config.s3ForcePathStyle=true', # 'configuration.backupStorageLocation[0].config.s3Url=https://s3.us-west-000.backblazeb2.com', # 'credentials.secretContents=cloud\n[default]\naws_access_key_id=AAAA\naws_secret_access_key=BBBB', # 'snapshotsEnabled=false', # # --set configuration.backupStorageLocation[0].name= \ # # --set configuration.backupStorageLocation[0].provider= \ # # --set configuration.backupStorageLocation[0].bucket= \ # # --set configuration.backupStorageLocation[0].config.region= \ # # --set configuration.volumeSnapshotLocation[0].name= \ # # --set configuration.volumeSnapshotLocation[0].provider= \ # # --set configuration.volumeSnapshotLocation[0].config.region= \ # # --set initContainers[0].name=velero-plugin-for- \ # # --set initContainers[0].image=velero/velero-plugin-for-: \ # # --set initContainers[0].volumeMounts[0].mountPath=/target \ # # --set initContainers[0].volumeMounts[0].name=plugins # ] # ) # helm_remote( # 'nitter', # repo_name='truecharts', # repo_url='https://charts.truecharts.org', # namespace='futureporn', # version='7.1.4', # ) # helm_remote( # 'frp-operator', # repo_name='frp-operator', # repo_url='https://zufardhiyaulhaq.com/frp-operator/charts/releases/', # namespace='futureporn', # version='1.0.0' # ) # helm_remote( # 'kubernetes-ingress-controller', # repo_name='kubernetes-ingress-controller', # repo_url='https://ngrok.github.io/kubernetes-ingress-controller', # namespace='futureporn', # create_namespace='false', # set=[ # 'credentials.apiKey=%s' % os.getenv('NGROK_API_KEY'), # 'credentials.authtoken=%s' % os.getenv('NGROK_AUTHTOKEN') # ] # ) # k8s_yaml(helm( # './charts/nitter', # values=['./charts/nitter/values.yaml'], # )) k8s_yaml(helm( './charts/fp', values=['./charts/fp/values.yaml'], )) ## we are using a local helm chart instead of using helm_remote because that command makes the tilt builds Hella slow. ## to download this chart, we used the following commands. ## future re-pulling is needed to keep things up-to-date. ## ## helm repo add bitnami https://charts.bitnami.com/bitnami ## helm pull bitnami/postgresql --untar --destination ./charts/postgresql k8s_yaml(helm( './charts/postgresql/postgresql', namespace='futureporn', values=[ './charts/postgresql/values-overrides.yaml' ] )) k8s_yaml(helm( './charts/cert-manager/cert-manager', namespace='cert-manager', values=['./charts/cert-manager/values-overrides.yaml'] )) # docker_build('fp/link2cid', './packages/link2cid') docker_build( 'fp/strapi', '.', dockerfile='./dockerfiles/strapi.dockerfile', target='strapi', only=[ './.npmrc', './package.json', './pnpm-lock.yaml', './pnpm-workspace.yaml', './services/strapi', './packages/types', ], live_update=[ sync('./services/strapi', '/app'), run('cd /app && pnpm i', trigger=['./services/strapi/package.json', './services/strapi/pnpm-lock.yaml']) ], pull=False, ) docker_build( 'fp/bot', '.', only=[ './.npmrc', './package.json', './pnpm-lock.yaml', './pnpm-workspace.yaml', './services/bot', './packages/types', ], dockerfile='./dockerfiles/bot.dockerfile', target='dev', live_update=[ sync('./services/bot', '/app/services/bot') ] ) load('ext://uibutton', 'cmd_button') cmd_button('postgres:create', argv=['./scripts/postgres-create.sh'], resource='postgresql-primary', icon_name='dataset', text='create (empty) databases', ) cmd_button('postgres:restore', argv=['./scripts/postgres-restore.sh'], resource='postgresql-primary', icon_name='upload', text='restore db from backup', ) cmd_button('postgres:drop', argv=['sh', './scripts/postgres-drop.sh'], resource='postgresql-primary', icon_name='delete', text='DROP all databases' ) cmd_button('capture-api:create', argv=['http', '--ignore-stdin', 'POST', 'http://localhost:5003/api/record', "url='https://twitch.tv/ironmouse'", "channel='ironmouse'"], resource='capture-api', icon_name='send', text='Start Recording' ) cmd_button('postgrest:migrate', argv=['./scripts/postgrest-migrations.sh'], resource='postgrest', icon_name='directions_run', text='Run migrations', ) cmd_button('pgadmin4:restore', argv=['./scripts/pgadmin-import-connection.sh'], resource='pgadmin4', icon_name='hub', text='import connection', ) cmd_button('factory:test', argv=['./scripts/factory-test.sh'], resource='factory', icon_name='factory', text='test', ) ## Uncomment the following for fp/next in dev mode ## this is useful for changing the UI and seeing results docker_build( 'fp/next', '.', dockerfile='dockerfiles/next.dockerfile', target='next', build_args={ 'NEXT_PUBLIC_STRAPI_URL': 'https://strapi.fp.sbtp.xyz' }, live_update=[ sync('./services/next', '/app') ], pull=False, ) docker_build( 'fp/factory', '.', dockerfile='./dockerfiles/factory.dockerfile', target='dev', live_update=[ sync('./services/factory', '/app/services/factory') ], pull=False, ) # docker_build( # 'fp/scout', # '.', # dockerfile='dockerfiles/scout.dockerfile', # target='scout', # live_update=[ # sync('./packages/scout', '/app'), # run('cd /app && pnpm i', trigger=['./packages/scout/package.json', './packages/scout/pnpm-lock.yaml']), # ], # entrypoint='pnpm nodemon --ext js,ts,json,yaml --exec node --no-warnings=ExperimentalWarning --loader ts-node/esm ./src/index.ts' # # entrypoint='pnpm tsx watch ./src/index.ts' # ) docker_build( 'fp/mailbox', '.', dockerfile='dockerfiles/mailbox.dockerfile', target='mailbox', only=[ './.npmrc', './package.json', './pnpm-lock.yaml', './pnpm-workspace.yaml', './packages/image', './packages/scout', './services/mailbox', './packages/types', './packages/utils', './packages/video', './packages/storage', ], live_update=[ sync('./services/mailbox', '/app'), run('cd /app && pnpm i', trigger=['./services/mailbox/package.json', './services/mailbox/pnpm-lock.yaml']), ], pull=False, # entrypoint='pnpm nodemon --ext js,ts,json,yaml --exec node --no-warnings=ExperimentalWarning --loader ts-node/esm ./src/index.ts' ) # docker_build( # 'fp/meal', # '.', # dockerfile='dockerfiles/meal.dockerfile', # target='meal', # only=[ # './.npmrc', # './package.json', # './pnpm-lock.yaml', # './pnpm-workspace.yaml', # './packages/meal', # './packages/taco', # './packages/types', # ], # live_update=[ # sync('./packages/meal', '/app'), # # run('cd /app && pnpm i', trigger=['./packages/meal/package.json', './packages/meal/pnpm-lock.yaml']), # ], # ) docker_build( 'fp/capture', '.', dockerfile='dockerfiles/capture.dockerfile', target='dev', only=[ './.npmrc', './package.json', './pnpm-lock.yaml', './pnpm-workspace.yaml', './packages/scout', './packages/types', './packages/utils', './services/capture', ], live_update=[ sync('./services/capture/dist', '/app/dist'), ], pull=False, ) # k8s_resource( # workload='kubernetes-ingress-controller-manager', # links=[ # link(os.getenv('NGROK_URL'), 'Endpoint') # ], # labels='ngrok' # ) # k8s_resource( # workload='frp-operator-controller-manager', # labels='tunnel' # ) # k8s_resource( # workload='echo', # links=[ # link('https://echo.fp.sbtp.xyz'), # link('http://echo.futureporn.svc.cluster.local:8001') # ], # labels='debug' # ) # k8s_resource( # workload='uppy', # links=[ # link('https://uppy.fp.sbtp.xyz'), # ], # resource_deps=['redis-master'], # labels=['backend'], # ) k8s_resource( workload='next', port_forwards=['3000'], links=[ link('https://next.fp.sbtp.xyz'), ], resource_deps=['strapi', 'postgresql-primary'], labels=['frontend'], ) k8s_resource( workload='strapi', port_forwards=['1339'], links=[ link('https://strapi.fp.sbtp.xyz/admin'), link('https://strapi.fp.sbtp.xyz'), ], resource_deps=['postgresql-primary'], labels=['backend'], ) k8s_resource( workload='postgresql-primary', port_forwards=['5432'], labels=['database'], ) k8s_resource( workload='postgresql-read', labels=['database'] ) k8s_resource( workload='mailbox', resource_deps=['postgresql-primary', 'strapi'], labels=['backend'], ) k8s_resource( workload='factory', resource_deps=['postgrest'], labels=['backend'], ) # k8s_resource( # workload='nitter', # port_forwards=['6060:10606'], # ) # temporarily disabled to save CPU resources # helm_remote( # 'kube-prometheus-stack', # repo_name='kube-prometheus-stack', # repo_url='https://prometheus-community.github.io/helm-charts', # namespace='futureporn', # version='61.1.1', # set=[ # 'prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName=vultr-block-storage', # 'admin.existingSecret=grafana', # 'sidecar.dashboards.enabled=true', # 'grafana.admin.existingSecret=grafana', # 'grafana.sidecar.dashboards.enabled=true', # 'grafana.sidecar.dashboards.defaultFolderName=balls', # 'grafana.sidecar.dashboards.label=grafana_dashboard', # 'grafana.sidecar.dashboards.provider.foldersFromFileStructure=true' # ] # ) ## redis is only here for uppy's usage. ## If we can engineer a way to delete redis, let's do it # helm_remote( # 'redis', # repo_name='redis', # repo_url='https://charts.bitnami.com/bitnami', # namespace='futureporn', # version='19.6.1', # set=[ # 'auth.existingSecret=redis', # 'auth.existingSecretPasswordKey=password', # 'replica.persistence.enabled=false', # 'architecture=standalone' # ] # ) k8s_resource( workload='external-dns', labels=['networking'], ) k8s_resource( workload='cert-manager-webhook-exoscale', labels=['networking'], ) # k8s_resource( # workload='redis-master', # labels=['backend'] # ) k8s_resource( workload='bot', labels=['backend'], resource_deps=['postgrest'], ) k8s_resource( workload='capture-api', port_forwards=['5003'], labels=['backend'], resource_deps=['postgrest', 'postgresql-primary'], ) k8s_resource( workload='capture-worker', labels=['backend'], resource_deps=['postgrest', 'postgresql-primary'], ) k8s_resource( workload='postgrest', port_forwards=['9000'], labels=['database'], resource_deps=['postgresql-primary'], ) k8s_resource( workload='pgadmin4', port_forwards=['5050:80'], labels=['database'], ) k8s_resource( workload='cert-manager', labels=['certificates'], ) k8s_resource( workload='cert-manager-cainjector', labels=['certificates'], ) k8s_resource( workload='cert-manager-webhook', labels=['certificates'], ) k8s_resource( workload='cert-manager-startupapicheck', labels=['certificates'], )