## Tiltfile for working with Futureporn cluster

## remote development settings
# allow_k8s_contexts('vke-e01a95c7-aa18-45a9-b8c2-ca36b6bb33f3')
# default_registry('ttl.sh/cjfuturepornnet-98ajf9iwejf9iupawh4efu8hawe')

## don't scrub secrets so we can more easily debug
secret_settings(
    disable_scrub=True
)

## cert-manager slows down Tilt updates so I prefer to keep it commented unless I specifically need to test certs
## cert-manager loaded using this extension is PAINFULLY SLOW, and it must re-install and re-test every time the Tiltfile changes.
## additionally, it is SYNCRHONOUS, which means nothing else can update until cert-manager is updated. @see https://github.com/tilt-dev/tilt-extensions/pull/90#issuecomment-704381205
## TL;DR: It's much preferred & much faster to use a helm chart for working with cert-manager in every environment.
load('ext://cert_manager', 'deploy_cert_manager')
deploy_cert_manager(
    load_to_kind=True,
    version='v1.15.1',
)

load('ext://helm_remote', 'helm_remote')

load('ext://dotenv', 'dotenv')
dotenv(fn='.env.development')


# helm_remote(
#     'velero',
#     repo_name='velero',
#     repo_url='https://vmware-tanzu.github.io/helm-charts',
#     namespace='futureporn',
#     version='6.6.0',
#     set=[
#         'configuration.backupStorageLocation[0].name=dev',
#         'configuration.backupStorageLocation[0].provider=aws',
#         'configuration.backupStorageLocation[0].bucket=futureporn-db-backup-dev',
#         'configuration.backupStorageLocation[0].config.region=us-west-000',
#         'configuration.backupStorageLocation[0].config.s3ForcePathStyle=true',
#         'configuration.backupStorageLocation[0].config.s3Url=https://s3.us-west-000.backblazeb2.com',
#         'credentials.secretContents=cloud\n[default]\naws_access_key_id=AAAA\naws_secret_access_key=BBBB',
#         'snapshotsEnabled=false',
# # --set configuration.backupStorageLocation[0].name=<BACKUP STORAGE LOCATION NAME> \
# # --set configuration.backupStorageLocation[0].provider=<PROVIDER NAME> \
# # --set configuration.backupStorageLocation[0].bucket=<BUCKET NAME> \
# # --set configuration.backupStorageLocation[0].config.region=<REGION> \
# # --set configuration.volumeSnapshotLocation[0].name=<VOLUME SNAPSHOT LOCATION NAME> \
# # --set configuration.volumeSnapshotLocation[0].provider=<PROVIDER NAME> \
# # --set configuration.volumeSnapshotLocation[0].config.region=<REGION> \
# # --set initContainers[0].name=velero-plugin-for-<PROVIDER NAME> \
# # --set initContainers[0].image=velero/velero-plugin-for-<PROVIDER NAME>:<PROVIDER PLUGIN TAG> \
# # --set initContainers[0].volumeMounts[0].mountPath=/target \
# # --set initContainers[0].volumeMounts[0].name=plugins
#     ]
# )



helm_remote(
    'traefik',
    repo_name='traefik',
    repo_url='https://traefik.github.io/charts',
    namespace='futureporn',
    version='28.3.0',
    set=[
        'globalArguments[0]=--global.sendanonymoususage=false',
        'service.enabled=true',
        'logs.access.enabled=true',
        'logs.access.format=json',
        'logs.general.level=DEBUG',
        'logs.general.format=json',
        'providers.kubernetesIngress.publishedService.enabled=true',
    ]
)


# helm_remote(
#     'nitter',
#     repo_name='truecharts',
#     repo_url='https://charts.truecharts.org',
#     namespace='futureporn',
#     version='7.1.4',
# )
# helm_remote(
#     'frp-operator',
#     repo_name='frp-operator',
#     repo_url='https://zufardhiyaulhaq.com/frp-operator/charts/releases/',
#     namespace='futureporn',
#     version='1.0.0'
# )
# helm_remote(
#     'kubernetes-ingress-controller',
#     repo_name='kubernetes-ingress-controller',
#     repo_url='https://ngrok.github.io/kubernetes-ingress-controller',
#     namespace='futureporn',
#     create_namespace='false',
#     set=[
#         'credentials.apiKey=%s' % os.getenv('NGROK_API_KEY'),
#         'credentials.authtoken=%s' % os.getenv('NGROK_AUTHTOKEN')
#     ]
# )


# k8s_yaml(helm(
#     './charts/nitter',
#     values=['./charts/nitter/values.yaml'],
# ))


k8s_yaml(helm(
    './charts/fp',
    values=['./charts/fp/values.yaml'],
))




# docker_build('fp/link2cid', './packages/link2cid')
docker_build(
    'fp/strapi',
    '.',
    dockerfile='./d.strapi.dockerfile',
    target='strapi',
    only=[
        './.npmrc',
        './package.json', 
        './pnpm-lock.yaml', 
        './pnpm-workspace.yaml',
        './packages/strapi',
        './packages/types',
    ],
    live_update=[
        sync('./packages/strapi', '/app'),
        run('cd /app && pnpm i', trigger=['./packages/strapi/package.json', './packages/strapi/pnpm-lock.yaml'])
    ]
)

docker_build(
    'fp/bot',
    '.',
    only=[
        './.npmrc',
        './package.json', 
        './pnpm-lock.yaml',
        './pnpm-workspace.yaml',
        './packages/bot',
        './packages/image',
        './packages/scout',
        './packages/storage',
        './packages/temporal-workflows',
        './packages/types',
        './packages/utils',
    ],
    dockerfile='./d.bot.dockerfile',
    target='dev',
    live_update=[
        sync('./packages/bot', '/app'),
        run('cd /app && pnpm i', trigger=['./packages/bot/package.json', './packages/bot/pnpm-lock.yaml'])
    ]
)






load('ext://uibutton', 'cmd_button')
cmd_button('postgres:create',
    argv=['./scripts/postgres-create.sh'],
    resource='postgres',
    icon_name='dataset',
    text='create (empty) databases',
)
cmd_button('postgres:restore',
    argv=['sh', './scripts/postgres-restore.sh'],
    resource='postgres',
    icon_name='upload',
    text='restore db from backup',
)
cmd_button('postgres:drop',
    argv=['sh', './scripts/postgres-drop.sh'],
    resource='postgres',
    icon_name='delete',
    text='DROP futureporn_db'
)
cmd_button('postgres:drop_temporal',
    argv=['sh', './scripts/postgres-drop-temporal.sh'],
    resource='postgres',
    icon_name='delete',
    text='DROP temporal'
)
cmd_button('postgres:backup',
    argv=['sh', './scripts/postgres-backup.sh'],
    resource='postgres',
    icon_name='download',
    text='backup the database'
)
cmd_button('temporal-web:namespace',
    argv=['sh', './scripts/temporal-namespaces.sh'],
    resource='temporal-web',
    icon_name='badge',
    text='create futureporn namespace',
)


## Uncomment the following for fp/next in dev mode
## this is useful for changing the UI and seeing results
docker_build(
    'fp/next',
    '.',
    dockerfile='d.next.dockerfile',
    target='next',
    build_args={
        'NEXT_PUBLIC_STRAPI_URL': 'https://strapi.fp.sbtp.xyz'
    },
    live_update=[
        sync('./packages/next', '/app')
    ]
)


docker_build(
    'fp/scout',
    '.',
    dockerfile='d.scout.dockerfile',
    target='scout',
    live_update=[
        sync('./packages/scout', '/app'),
        run('cd /app && pnpm i', trigger=['./packages/scout/package.json', './packages/scout/pnpm-lock.yaml']),
    ],
    entrypoint='pnpm nodemon --ext js,ts,json,yaml --exec node --no-warnings=ExperimentalWarning --loader ts-node/esm ./src/index.ts'
    # entrypoint='pnpm tsx watch ./src/index.ts'
)


docker_build(
    'fp/worker',
    '.',
    dockerfile='d.worker.dockerfile',
    target='worker',
    only=[
        './.npmrc',
        './package.json', 
        './pnpm-lock.yaml', 
        './pnpm-workspace.yaml',
        './packages/image',
        './packages/scout',
        './packages/temporal-workflows', 
        './packages/temporal-worker',
        './packages/types',
        './packages/utils',
        './packages/video',
        './packages/storage',
    ],
    live_update=[
        sync('./packages/temporal-worker', '/app'),
        run('cd /app && pnpm i', trigger=['./packages/temporal-worker/package.json', './packages/temporal-worker/pnpm-lock.yaml']),
    ],
    # entrypoint='pnpm nodemon --ext js,ts,json,yaml --exec node --no-warnings=ExperimentalWarning --loader ts-node/esm ./src/index.ts'
)



docker_build(
    'fp/boop',
    '.',
    dockerfile='d.boop.dockerfile',
    target='boop',
    only=[
        './.npmrc',
        './package.json', 
        './pnpm-lock.yaml', 
        './pnpm-workspace.yaml',
        './packages/boop', 
        './packages/taco',
        './packages/types',
    ],
    live_update=[
        sync('./packages/boop', '/app'),
        # run('cd /app && pnpm i', trigger=['./packages/boop/package.json', './packages/boop/pnpm-lock.yaml']),
    ],
)



# docker_build(
#     'fp/scout-worker',
#     '.',
#     # ignore=['./packages/next'], # I wish I could use this ignore to ignore file changes in this dir, but that's not how it works
#     dockerfile='d.scout.dockerfile',
#     target='scout-worker',
#     live_update=[
#         # idk if this run() is effective
#         # run('cd /app && pnpm i', trigger=['./packages/scout/package.json', './packages/scout/pnpm-lock.yaml']),
#         sync('./packages/scout', '/app'),
#         ## this is a hack to avoid complete scout image rebuilds when src in ./packages/next is updated
#         ## ./packages/next needs to be in the build context because scout depends on types exported from next module (a feature of pnpm workspaces)
#         ## instead of a full rebuild, we put ./packages/next in the live_update spec so the changed files get shoved into /ignore-me
#         ## ideally, I would like to include ./packages/next in the build context, but ignore file changes there for rebuilds.
#         ## however, I don't think tilt has this capability.
#         sync('./packages/next', '/ignore-me'),
#     ],
#     # this entrypoint is a godsend. It lets me restart the node app (fast) without having to rebuild the docker container (slow)
#     entrypoint='pnpm nodemon --ext js,ts,json,yaml --exec node --no-warnings=ExperimentalWarning --loader ts-node/esm ./src/temporal/worker.ts'
# )




# k8s_resource(
#     workload='kubernetes-ingress-controller-manager',
#     links=[
#       link(os.getenv('NGROK_URL'), 'Endpoint')
#     ],
#     labels='ngrok'
# )
# k8s_resource(
#     workload='frp-operator-controller-manager',
#     labels='tunnel'
# )
# k8s_resource(
#     workload='echo',
#     links=[
#         link('https://echo.fp.sbtp.xyz'),
#         link('http://echo.futureporn.svc.cluster.local:8001')
#     ],
#     labels='debug'
# )


k8s_resource(
    workload='uppy',
    links=[
        link('https://uppy.fp.sbtp.xyz'),
    ],
    resource_deps=['redis-master'],
    labels=['backend'],
)
k8s_resource(
    workload='next',
    port_forwards=['3000'],
    links=[
        link('https://next.fp.sbtp.xyz'),
    ],
    resource_deps=['strapi', 'postgres'],
    labels=['frontend'],
)
k8s_resource(
    workload='strapi',
    port_forwards=['1339'],
    links=[
        link('https://strapi.fp.sbtp.xyz/admin'),
        link('https://strapi.fp.sbtp.xyz'),
    ],
    resource_deps=['postgres'],
    labels=['backend'],
)

k8s_resource(
    workload='postgres',
    port_forwards=['5432'],
    labels=['backend'],
)

k8s_resource(
    workload='traefik',
    port_forwards=['9000:9000'],
    links=[
        link('http://localhost:9000/dashboard')
    ],
    labels=['networking'],
)



k8s_resource(
    workload='scout',
    resource_deps=['postgres', 'strapi', 'temporal-frontend', 'worker'],
    labels=['backend'],
)

# k8s_resource(
#     workload='',

# )

# k8s_resource(
#     workload='pgadmin',
#     port_forwards=['5050'],
#     resource_deps=['postgres']
# )

# k8s_resource(
#     workload='nitter',
#     port_forwards=['6060:10606'],
# )

# temporarily disabled to save CPU resources
# helm_remote(
#     'kube-prometheus-stack',
#     repo_name='kube-prometheus-stack',
#     repo_url='https://prometheus-community.github.io/helm-charts',
#     namespace='futureporn',
#     version='61.1.1',
#     set=[
#         'prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName=vultr-block-storage',
#         'admin.existingSecret=grafana',
#         'sidecar.dashboards.enabled=true',
#         'grafana.admin.existingSecret=grafana',
#         'grafana.sidecar.dashboards.enabled=true',
#         'grafana.sidecar.dashboards.defaultFolderName=balls',
#         'grafana.sidecar.dashboards.label=grafana_dashboard',
#         'grafana.sidecar.dashboards.provider.foldersFromFileStructure=true'
#     ]
# )

helm_remote(
    'redis',
    repo_name='redis',
    repo_url='https://charts.bitnami.com/bitnami',
    namespace='futureporn',
    version='19.6.1',
    set=[
        'auth.existingSecret=redis',
        'auth.existingSecretPasswordKey=password',
        'replica.persistence.enabled=false',
        'architecture=standalone'
    ]
)

helm_remote(
    'temporal',
    repo_name='temporal',
    repo_url='https://charts.lemontech.engineering',
    namespace='futureporn',
    version='0.37.0',
    set=[
        'admintools.image.tag=1.24.2-tctl-1.18.1-cli-0.13.0',
        'web.image.tag=2.28.0',
        'prometheus.enabled=false',
        'grafana.enabled=false',
        'elasticsearch.enabled=false',
        'web.config.auth.enabled=true',
        'cassandra.enabled=false',
        'server.config.persistence.default.driver=sql',
        'server.config.persistence.default.sql.driver=postgres12',
        'server.config.persistence.default.sql.existingSecret=postgres',
        'server.config.persistence.default.sql.secretName=postgres',
        'server.config.persistence.default.sql.secretKey=password',
        'server.config.persistence.default.sql.host=postgresql-primary.futureporn.svc.cluster.local',
        'server.config.persistence.default.sql.port=5432',
        'server.config.persistence.default.sql.user=postgres',
        'server.config.persistence.visibility.driver=sql',
        'server.config.persistence.visibility.sql.driver=postgres12',
        'server.config.persistence.visibility.sql.host=postgresql-primary.futureporn.svc.cluster.local',
        'server.config.persistence.visibility.sql.port=5432',
        'server.config.persistence.visibility.sql.user=postgres',
        'server.config.persistence.visibility.sql.existingSecret=postgres',
        'server.config.persistence.visibility.sql.secretName=postgres',
        'server.config.persistence.visibility.sql.secretKey=password',
    ]
)
k8s_resource(
    workload='temporal-admintools',
    labels='temporal',
    resource_deps=[
        'postgres', 
        'strapi'
    ])
k8s_resource(
    workload='temporal-frontend',
    labels='temporal',
    resource_deps=[
        'postgres', 
        'strapi'
    ])
k8s_resource(
    workload='temporal-history',
    labels='temporal',
    resource_deps=[
        'postgres', 
        'strapi'
    ])
k8s_resource(
    workload='temporal-worker',
    labels='temporal',
    resource_deps=[
        'postgres', 
        'strapi'
    ])
k8s_resource(
    workload='temporal-web',
    labels='temporal', port_forwards=['8080'],
    resource_deps=[
        'postgres', 
        'strapi'
    ])
k8s_resource(
    workload='temporal-schema-setup',
    labels='temporal',
    resource_deps=[
        'postgres', 
        'strapi'
    ])
k8s_resource(
    workload='temporal-schema-update',
    labels='temporal',
    resource_deps=[
        'postgres', 
        'strapi'
    ])
k8s_resource(
    workload='temporal-matching',
    labels='temporal',
    resource_deps=[
        'postgres', 
        'strapi'
    ])

k8s_resource(
    workload='external-dns',
    labels=['networking'],
)

k8s_resource(
    workload='cert-manager-webhook-exoscale',
    labels=['networking'],
)


k8s_resource(
    workload='redis-master',
    labels=['backend']
)

k8s_resource(
    workload='bot',
    labels=['backend'],
    # resource_deps=['strapi', 'temporal-web'],
)

k8s_resource(
    workload='worker',
    labels=['backend'],
    resource_deps=['strapi', 'temporal-web', 'postgres' ],
)

# k8s_resource(
#     workload='trigger',
#     labels=['backend'],
#     port_forwards=['3030:3000'],
#     resource_deps=['postgres', 'redis-master'],
#     links=[
#         link('http://localhost:3030')
#     ],
# )