fp/t.wip.tiltfile

434 lines
13 KiB
Plaintext

# Tiltfile for working with Next and Strapi locally
## cert-manager slows down Tilt updates so I prefer to keep it commented unless I specifically need to test certs
load('ext://cert_manager', 'deploy_cert_manager')
deploy_cert_manager(
load_to_kind=True,
version='v1.15.1',
)
default_registry('localhost:5001')
load('ext://helm_remote', 'helm_remote')
# load('ext://dotenv', 'dotenv')
# dotenv(fn='.env')
# allow_k8s_contexts('vke-e41885d3-7f93-4f01-bfaa-426f20bf9f3f')
# helm_remote(
# 'velero',
# repo_name='velero',
# repo_url='https://vmware-tanzu.github.io/helm-charts',
# namespace='futureporn',
# version='6.6.0',
# set=[
# 'configuration.backupStorageLocation[0].name=dev',
# 'configuration.backupStorageLocation[0].provider=aws',
# 'configuration.backupStorageLocation[0].bucket=futureporn-db-backup-dev',
# 'configuration.backupStorageLocation[0].config.region=us-west-000',
# 'configuration.backupStorageLocation[0].config.s3ForcePathStyle=true',
# 'configuration.backupStorageLocation[0].config.s3Url=https://s3.us-west-000.backblazeb2.com',
# 'credentials.secretContents=cloud\n[default]\naws_access_key_id=AAAA\naws_secret_access_key=BBBB',
# 'snapshotsEnabled=false',
# # --set configuration.backupStorageLocation[0].name=<BACKUP STORAGE LOCATION NAME> \
# # --set configuration.backupStorageLocation[0].provider=<PROVIDER NAME> \
# # --set configuration.backupStorageLocation[0].bucket=<BUCKET NAME> \
# # --set configuration.backupStorageLocation[0].config.region=<REGION> \
# # --set configuration.volumeSnapshotLocation[0].name=<VOLUME SNAPSHOT LOCATION NAME> \
# # --set configuration.volumeSnapshotLocation[0].provider=<PROVIDER NAME> \
# # --set configuration.volumeSnapshotLocation[0].config.region=<REGION> \
# # --set initContainers[0].name=velero-plugin-for-<PROVIDER NAME> \
# # --set initContainers[0].image=velero/velero-plugin-for-<PROVIDER NAME>:<PROVIDER PLUGIN TAG> \
# # --set initContainers[0].volumeMounts[0].mountPath=/target \
# # --set initContainers[0].volumeMounts[0].name=plugins
# ]
# )
## this method results in the following error. Build Failed: Internal error occurred: failed calling webhook "webhook.cert-manager.io": failed to call webhook: Post "https://cert-manager-webhook.cert-manager.svc:443/validate?timeout=30s": service "cert-manager-webhook" not found
# helm_remote(
# 'cert-manager',
# repo_url='https://charts.jetstack.io',
# repo_name='cert-manager',
# namespace='cert-manager',
# version='1.15.1',
# set=[
# 'crds.enabled=true'
# ]
# )
helm_remote(
'traefik',
repo_name='traefik',
repo_url='https://traefik.github.io/charts',
namespace='futureporn',
version='28.3.0',
set=[
'globalArguments[0]=--global.sendanonymoususage=false',
'service.enabled=false',
'logs.access.enabled=true',
'logs.general.level=DEBUG'
]
)
# helm_remote(
# 'nitter',
# repo_name='truecharts',
# repo_url='https://charts.truecharts.org',
# namespace='futureporn',
# version='7.1.4',
# )
# helm_remote(
# 'frp-operator',
# repo_name='frp-operator',
# repo_url='https://zufardhiyaulhaq.com/frp-operator/charts/releases/',
# namespace='futureporn',
# version='1.0.0'
# )
# helm_remote(
# 'kubernetes-ingress-controller',
# repo_name='kubernetes-ingress-controller',
# repo_url='https://ngrok.github.io/kubernetes-ingress-controller',
# namespace='futureporn',
# create_namespace='false',
# set=[
# 'credentials.apiKey=%s' % os.getenv('NGROK_API_KEY'),
# 'credentials.authtoken=%s' % os.getenv('NGROK_AUTHTOKEN')
# ]
# )
# k8s_yaml(helm(
# './charts/nitter',
# values=['./charts/nitter/values.yaml'],
# ))
k8s_yaml(helm(
'./charts/fp',
values=['./charts/fp/values-dev.yaml'],
))
# k8s_yaml(helm(
# './charts/trigger',
# set=[
# 'trigger.name=trigger',
# 'trigger.replicaCount=2',
# 'trigger.image.tag=self-host-rc.2',
# 'trigger.image.pullPolicy=IfNotPresent',
# 'trigger.env.ENCRYPTION_KEY=%s' % os.getenv('TRIGGER_ENCRYPTION_KEY'),
# 'trigger.env.MAGIC_LINK_SECRET=%s' % os.getenv('TRIGGER_MAGIC_LINK_SECRET'),
# 'trigger.env.DATABASE_URL=%s' % os.getenv('TRIGGER_DATABASE_URL'),
# 'trigger.env.LOGIN_ORIGIN=%s' % os.getenv('TRIGGER_LOGIN_ORIGIN'),
# 'trigger.env.APP_ORIGIN=%s' % os.getenv('TRIGGER_APP_ORIGIN'),
# 'trigger.env.PORT=%s' % os.getenv('TRIGGER_PORT'),
# 'trigger.env.REMIX_APP_PORT=%s' % os.getenv('TRIGGER_REMIX_APP_PORT'),
# 'trigger.env.REDIS_HOST=redis-master.futureporn.svc.cluster.local',
# 'trigger.env.REDIS_PORT=6379',
# 'trigger.ingress.nginx.enabled=false',
# 'trigger.ingress.enabled=false',
# 'postgres.enabled=false'
# ]
# ))
# k8s_resource(
# workload='trigger',
# port_forwards=['3030'],
# )
# docker_build('fp/link2cid', './packages/link2cid')
docker_build(
'fp/strapi',
'.',
build_args={
'NODE_ENV': 'development',
},
only=['./packages/strapi'],
dockerfile='./d.strapi.dockerfile',
live_update=[
sync('./packages/strapi', '/app')
]
)
# docker_build(
# 'fp/strapi-app',
# '.',
# only=["./packages/strapi-app"],
# dockerfile='d.strapi-app.dockerfile',
# live_update=[
# sync('./packages/strapi-app', '/app')
# ]
# )
load('ext://uibutton', 'cmd_button')
cmd_button('postgres:create',
argv=['sh', './scripts/postgres-create.sh'],
resource='postgres',
icon_name='dataset',
text='create (empty) databases',
)
cmd_button('postgres:restore',
argv=['sh', './scripts/postgres-restore.sh'],
resource='postgres',
icon_name='upload',
text='restore db from backup',
)
cmd_button('postgres:drop',
argv=['sh', './scripts/postgres-drop.sh'],
resource='postgres',
icon_name='delete',
text='DROP futureporn_db'
)
cmd_button('postgres:drop_temporal',
argv=['sh', './scripts/postgres-drop-temporal.sh'],
resource='postgres',
icon_name='delete',
text='DROP temporal'
)
cmd_button('postgres:backup',
argv=['sh', './scripts/postgres-backup.sh'],
resource='postgres',
icon_name='download',
text='backup the database'
)
cmd_button('temporal-web:namespace',
argv=['sh', './scripts/temporal-namespaces.sh'],
resource='temporal-web',
icon_name='badge',
text='create futureporn namespace',
)
## Uncomment the following for fp/next in dev mode
## this is useful for changing the UI and seeing results
docker_build(
'fp/next',
'.',
only=['./pnpm-lock.yaml', './package.json', './packages/next'],
dockerfile='d.next.dockerfile',
target='dev',
build_args={
'NEXT_PUBLIC_STRAPI_URL': 'https://strapi.fp.sbtp.xyz'
},
live_update=[
sync('./packages/next', '/app')
]
)
docker_build(
'fp/scout-manager',
'.',
only=['./pnpm-lock.yaml', './package.json', './packages/scout', './packages/next'],
dockerfile='d.scout.dockerfile',
target='manager',
live_update=[
sync('./packages/scout', '/app'),
run('cd /app && pnpm i', trigger=['./packages/scout/package.json', './packages/scout/pnpm-lock.yaml']),
],
entrypoint='pnpm nodemon --ext js,ts,json,yaml --exec node --no-warnings=ExperimentalWarning --loader ts-node/esm ./src/index.ts'
# entrypoint='pnpm tsx watch ./src/index.ts'
)
docker_build(
'fp/scout-worker',
'.',
only=['./pnpm-lock.yaml', './package.json', './packages/scout', './packages/next'],
# ignore=['./packages/next'], # I wish I could use this ignore to ignore file changes in this dir, but that's not how it works
dockerfile='d.scout.dockerfile',
target='worker',
live_update=[
# idk if this run() is effective
# run('cd /app && pnpm i', trigger=['./packages/scout/package.json', './packages/scout/pnpm-lock.yaml']),
sync('./packages/scout', '/app'),
## this is a hack to avoid complete scout image rebuilds when src in ./packages/next is updated
## ./packages/next needs to be in the build context because scout depends on types exported from next module (a feature of pnpm workspaces)
## instead of a full rebuild, we put ./packages/next in the live_update spec so the changed files get shoved into /ignore-me
## ideally, I would like to include ./packages/next in the build context, but ignore file changes there for rebuilds.
## however, I don't think tilt has this capability.
sync('./packages/next', '/ignore-me'),
],
# this entrypoint is a godsend. It lets me restart the node app (fast) without having to rebuild the docker container (slow)
entrypoint='pnpm nodemon --ext js,ts,json,yaml --exec node --no-warnings=ExperimentalWarning --loader ts-node/esm ./src/temporal/worker.ts'
)
# k8s_resource(
# workload='kubernetes-ingress-controller-manager',
# links=[
# link(os.getenv('NGROK_URL'), 'Endpoint')
# ],
# labels='ngrok'
# )
# k8s_resource(
# workload='frp-operator-controller-manager',
# labels='tunnel'
# )
k8s_resource(
workload='echo',
port_forwards=['8080'],
links=[
link('https://echo.fp.sbtp.xyz'),
link('http://echo.futureporn.svc.cluster.local:8001')
],
labels='debug'
)
# k8s_resource(
# workload='snake',
# port_forwards=['8080'],
# labels='debug'
# )
# k8s_resource(
# workload='game-2048',
# port_forwards=['8081:8080'],
# labels='debug'
# )
k8s_resource(
workload='next',
port_forwards=['3000'],
links=[
link('https://next.fp.sbtp.xyz'),
],
resource_deps=['strapi', 'postgres']
)
k8s_resource(
workload='strapi',
port_forwards=['1339'],
links=[
link('http://localhost:1339/admin'),
link('https://strapi.fp.sbtp.xyz'),
],
resource_deps=['postgres']
)
k8s_resource(
workload='postgres',
port_forwards=['5432']
)
# k8s_resource(
# workload='scout-worker',
# resource_deps=['postgres', 'strapi', 'temporal-frontend', 'scout-manager']
# )
# k8s_resource(
# workload='scout-manager',
# resource_deps=['postgres', 'strapi', 'temporal-frontend']
# )
# k8s_resource(
# workload='pgadmin',
# port_forwards=['5050'],
# resource_deps=['postgres']
# )
# k8s_resource(
# workload='nitter',
# port_forwards=['6060:10606'],
# )
# helm_remote(
# 'temporal',
# repo_name='temporal',
# repo_url='https://charts.lemontech.engineering',
# namespace='futureporn',
# version='0.37.0',
# set=[
# 'admintools.image.tag=1.24.1-tctl-1.18.1-cli-0.12.0',
# 'web.image.tag=2.27.2',
# 'prometheus.enabled=false',
# 'grafana.enabled=false',
# 'elasticsearch.enabled=false',
# 'web.config.auth.enabled=true',
# 'cassandra.enabled=false',
# 'server.config.persistence.default.driver=sql',
# 'server.config.persistence.default.sql.driver=postgres12',
# 'server.config.persistence.default.sql.host=%s' % os.getenv('POSTGRES_HOST'),
# 'server.config.persistence.default.sql.port=5432',
# 'server.config.persistence.default.sql.user=%s' % os.getenv('POSTGRES_USER'),
# 'server.config.persistence.default.sql.password=%s' % os.getenv('POSTGRES_PASSWORD'),
# 'server.config.persistence.visibility.driver=sql',
# 'server.config.persistence.visibility.sql.driver=postgres12',
# 'server.config.persistence.visibility.sql.host=%s' % os.getenv('POSTGRES_HOST'),
# 'server.config.persistence.visibility.sql.port=5432',
# 'server.config.persistence.visibility.sql.user=%s' % os.getenv('POSTGRES_USER'),
# 'server.config.persistence.visibility.sql.password=%s' % os.getenv('POSTGRES_PASSWORD'),
# ]
# )
# k8s_resource(
# workload='temporal-admintools',
# labels='temporal',
# resource_deps=[
# 'postgres',
# 'strapi'
# ])
# k8s_resource(
# workload='temporal-frontend',
# labels='temporal', port_forwards=['7233'],
# resource_deps=[
# 'postgres',
# 'strapi'
# ])
# k8s_resource(
# workload='temporal-history',
# labels='temporal',
# resource_deps=[
# 'postgres',
# 'strapi'
# ])
# k8s_resource(
# workload='temporal-worker',
# labels='temporal',
# resource_deps=[
# 'postgres',
# 'strapi'
# ])
# k8s_resource(
# workload='temporal-web',
# labels='temporal', port_forwards=['8080'],
# resource_deps=[
# 'postgres',
# 'strapi'
# ])
# k8s_resource(
# workload='temporal-schema-setup',
# labels='temporal',
# resource_deps=[
# 'postgres',
# 'strapi'
# ])
# k8s_resource(
# workload='temporal-schema-update',
# labels='temporal',
# resource_deps=[
# 'postgres',
# 'strapi'
# ])
# k8s_resource(
# workload='temporal-matching',
# labels='temporal',
# resource_deps=[
# 'postgres',
# 'strapi'
# ])
# k8s_resource(
# workload='cert-manager',
# labels='cert-manager'
# )
# k8s_resource(
# workload='cert-manager-webhook',
# labels='cert-manager'
# )
# k8s_resource(
# workload='cert-manager-cainjector',
# labels='cert-manager'
# )
# k8s_resource(
# workload='cert-manager-startupapicheck',
# labels='cert-manager'
# )