remove .spec.version which is out of place
ci / build (push) Waiting to run Details

This commit is contained in:
CJ_Clippy 2024-06-19 08:46:23 -08:00
parent c7ae9244b9
commit 4900a22ba0
17 changed files with 1061 additions and 1231 deletions

View File

@ -10,7 +10,7 @@ secrets:
flux:
./scripts/flux-bootstrap.sh
dev: kind namespaces secrets chisel velero tilt
dev: kind namespaces secrets chisel velero
prod: namespaces secrets velero flux
@ -18,7 +18,8 @@ velero:
./scripts/velero-create.sh
tilt:
tilt up -f ./t.wip.tiltfile
kind get kubeconfig > ~/.kube/kind.yaml
KUBECONFIG=~/.kube/kind.yaml tilt up -f ./t.wip.tiltfile
define _script

View File

@ -1,7 +1,7 @@
apiVersion: chisel-operator.io/v1
kind: ExitNode
metadata:
name: uwu-exit-node
name: chisel-exit-node
namespace: futureporn
spec:
# IP address of exit node
@ -12,4 +12,4 @@ spec:
# Name of the secret containing the auth key
# This is not required, but recommended
# If not set, the operator will automatically generate a secret for you
auth: uwu-auth
auth: chisel

View File

@ -1,13 +1,12 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta2
kind: HelmRelease
metadata:
name: temporal
namespace: futureporn
name: temporal
spec:
releaseName: temporal
version: '0.37.0'
chart:
version: '0.38.0'
version: '0.37.0'
spec:
version: '0.39.0'
chart: temporal

View File

@ -8,7 +8,7 @@ metadata:
spec:
host: "{{ .Values.chisel.exitNodeIp }}"
port: 9090
auth: uwu-auth
auth: chisel
{{ end }}
@ -19,7 +19,7 @@ metadata:
name: next
namespace: futureporn
{{ if eq .Values.managedBy "tilt" }}
# create a tunnel to uwu-exit-node (chisel server)
# create a tunnel to chisel-exit-node (chisel server)
# this allows us to have SSL in development
annotations:
chisel-operator.io/exit-node-name: "next-exit-node"

View File

@ -8,7 +8,7 @@ metadata:
spec:
host: "{{ .Values.chisel.exitNodeIp }}"
port: 9090
auth: uwu-auth
auth: chisel
{{ end }}
---
@ -18,7 +18,7 @@ metadata:
name: strapi
namespace: futureporn
{{ if eq .Values.managedBy "tilt" }}
# create a tunnel to uwu-exit-node (chisel server)
# create a tunnel to chisel-exit-node (chisel server)
# this allows us to have SSL in development
annotations:
chisel-operator.io/exit-node-name: "strapi-exit-node"
@ -168,6 +168,8 @@ spec:
# storageClassName: {{ .Values.storageClassName }}
{{ if eq .Values.managedBy "Helm" }}
---
apiVersion: networking.k8s.io/v1

View File

@ -1,219 +0,0 @@
version: '3.4'
services:
chisel:
container_name: fp-chisel
image: jpillora/chisel
ports:
- "9312:9312"
restart: on-failure
command: "client --auth=${CHISEL_AUTH} ${CHISEL_SERVER} R:8899:cluster0:9094 R:8901:link2cid:3939 R:8900:strapi:1337 R:8902:next:3000 R:8903:uppy:3020 R:8904:uppy:8888"
link2cid:
container_name: fp-link2cid
restart: on-failure
build:
context: ./packages/link2cid
dockerfile: Dockerfile
target: dev
ports:
- "3939:3939"
environment:
API_KEY: ${LINK2CID_API_KEY}
IPFS_URL: "http://ipfs0:5001"
PORT: 3939
volumes:
- ./packages/link2cid/index.js:/app/index.js
ipfs0:
container_name: fp-ipfs0
restart: on-failure
image: ipfs/kubo:release
ports:
- "5001:5001"
volumes:
- ./compose/ipfs0:/data/ipfs
cluster0:
container_name: fp-cluster0
image: ipfs/ipfs-cluster:latest
restart: on-failure
depends_on:
- ipfs0
environment:
CLUSTER_PEERNAME: cluster0
CLUSTER_SECRET: ${CLUSTER_SECRET} # From shell variable if set
CLUSTER_IPFSHTTP_NODEMULTIADDRESS: /dns4/ipfs0/tcp/5001
CLUSTER_CRDT_TRUSTEDPEERS: '*' # Trust all peers in Cluster
CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS: /ip4/0.0.0.0/tcp/9094 # Expose API
CLUSTER_RESTAPI_BASICAUTHCREDENTIALS: ${CLUSTER_RESTAPI_BASICAUTHCREDENTIALS}
CLUSTER_MONITORPINGINTERVAL: 2s # Speed up peer discovery
ports:
- "127.0.0.1:9094:9094"
volumes:
- ./compose/cluster0:/data/ipfs-cluster
strapi:
container_name: fp-strapi
image: fp-strapi:14
build:
context: ./packages/strapi
dockerfile: Dockerfile
restart: on-failure
depends_on:
- db
# env_file: ./packages/strapi/.env
environment:
# ADMIN_PASSWORD: ${STRAPI_ADMIN_PASSWORD}
# ADMIN_EMAIL: ${STRAPI_ADMIN_EMAIL}
BASE_URL: ${STRAPI_BASE_URL}
SMTP_HOST: 172.17.0.1
SMTP_PORT: 25
SMTP_AUTH_STRATEGY: NONE
SMTP_FROM_EMAIL: sender@example.com
SENDGRID_API_KEY: ${SENDGRID_API_KEY}
DATABASE_CLIENT: postgres
DATABASE_HOST: db
DATABASE_PORT: ${POSTGRES_PORT}
DATABASE_NAME: ${POSTGRES_DB}
DATABASE_USERNAME: ${POSTGRES_USER}
DATABASE_PASSWORD: ${POSTGRES_PASSWORD}
JWT_SECRET: ${STRAPI_JWT_SECRET}
ADMIN_JWT_SECRET: ${STRAPI_ADMIN_JWT_SECRET}
APP_KEYS: ${STRAPI_APP_KEYS}
NODE_ENV: ${NODE_ENV}
API_TOKEN_SALT: ${STRAPI_API_TOKEN_SALT}
TRANSFER_TOKEN_SALT: ${STRAPI_TRANSFER_TOKEN_SALT}
MUX_SIGNING_KEY_PRIVATE_KEY: ${MUX_SIGNING_KEY_PRIVATE_KEY}
MUX_SIGNING_KEY_ID: ${MUX_SIGNING_KEY_ID}
MUX_PLAYBACK_RESTRICTION_ID: ${MUX_PLAYBACK_RESTRICTION_ID}
STRAPI_URL: ${STRAPI_URL}
CDN_BUCKET_URL: ${CDN_BUCKET_URL}
CDN_BUCKET_USC_URL: ${CDN_BUCKET_USC_URL}
S3_USC_BUCKET_KEY_ID: ${S3_USC_BUCKET_KEY_ID}
S3_USC_BUCKET_APPLICATION_KEY: ${S3_USC_BUCKET_APPLICATION_KEY}
S3_USC_BUCKET_NAME: ${S3_USC_BUCKET_NAME}
S3_USC_BUCKET_ENDPOINT: ${S3_USC_BUCKET_ENDPOINT}
S3_USC_BUCKET_REGION: ${S3_USC_BUCKET_REGION}
AWS_ACCESS_KEY_ID: ${S3_USC_BUCKET_KEY_ID}
AWS_SECRET_ACCESS_KEY: ${S3_USC_BUCKET_APPLICATION_KEY}
ports:
- "1337:1337"
volumes:
- ./packages/strapi/config:/opt/app/config
- ./packages/strapi/src:/opt/app/src
- ./packages/strapi/database:/opt/app/database
- ./packages/strapi/public/uploads:/opt/app/public/uploads
- ./packages/strapi/package.json:/opt/app/package.json
- ./packages/strapi/yarn.lock:/opt/app/yarn.lock
# - ./packages/strapi/.env:/opt/app/.env
# - ./packages/strapi/entrypoint.sh:/opt/app/entrypoint.sh
next:
container_name: fp-next
build:
context: .
dockerfile: next.Dockerfile
target: dev
restart: on-failure
environment:
REVALIDATION_TOKEN: ${NEXT_REVALIDATION_TOKEN}
NODE_ENV: development
NEXT_PUBLIC_STRAPI_URL: ${NEXT_PUBLIC_STRAPI_URL}
NEXT_PUBLIC_UPPY_COMPANION_URL: ${NEXT_PUBLIC_UPPY_COMPANION_URL}
NEXT_PUBLIC_SITE_URL: ${NEXT_PUBLIC_SITE_URL}
ports:
- "3000:3000"
volumes:
# - /app/node_modules
# - /app/.next
# - /app/.pnpm-store
- ./packages/next/app:/app/app
bot:
container_name: fp-bot
build:
context: .
dockerfile: ./packages/bot/Dockerfile
target: dev
restart: on-failure
environment:
REST_HOST: localhost
REST_PORT: 8888
DISCORD_TOKEN: ${DISCORD_TOKEN}
DISCORD_GUILD_ID: ${DISCORD_GUILD_ID}
ports:
- "8888:8888"
volumes:
- ./packages/bot/package.json:/app/package.json
- ./packages/bot/src:/app/src
db:
container_name: fp-db
image: postgres:16
restart: on-failure
environment:
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
PGDATA: /var/lib/postgresql/data
PGPORT: ${POSTGRES_PORT}
volumes:
- ./compose/db/pgdata:/var/lib/postgresql/data
ports:
- "15432:15432"
pgadmin:
container_name: fp-pgadmin
image: dpage/pgadmin4:8
restart: on-failure
environment:
PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL}
PGADMIN_DEFAULT_PASSWORD: ${PGADMIN_DEFAULT_PASSWORD}
PGADMIN_DISABLE_POSTFIX: yessir
GUNICORN_ACCESS_LOGFILE: /tmp/pgadmin-gunicorn-access.log # this makes console output less noisy
ports:
- "5050:80"
uppy:
container_name: fp-uppy
build:
context: .
dockerfile: ./packages/uppy/Dockerfile
target: run
restart: on-failure
environment:
SESSION_SECRET: ${UPPY_SESSION_SECRET}
PORT: ${UPPY_PORT}
FILEPATH: ${UPPY_FILEPATH}
NEXT_PUBLIC_SITE_URL: ${NEXT_PUBLIC_SITE_URL}
HOST: ${UPPY_HOST}
UPLOAD_URLS: ${UPPY_UPLOAD_URLS}
SECRET: ${UPPY_SECRET}
SERVER_BASE_URL: ${UPPY_SERVER_BASE_URL}
B2_ENDPOINT: ${UPPY_B2_ENDPOINT}
B2_BUCKET: ${UPPY_B2_BUCKET}
B2_SECRET: ${UPPY_B2_SECRET}
B2_KEY: ${UPPY_B2_KEY}
B2_REGION: ${UPPY_B2_REGION}
DRIVE_KEY: ${UPPY_DRIVE_KEY}
DRIVE_SECRET: ${UPPY_DRIVE_SECRET}
DROPBOX_KEY: ${UPPY_DROPBOX_KEY}
DROPBOX_SECRET: ${UPPY_DROPBOX_SECRET}
JWT_SECRET: ${STRAPI_JWT_SECRET} # we use strapi's JWT secret so we can verify that uploads are from account holders
STRAPI_API_KEY: ${UPPY_STRAPI_API_KEY}
STRAPI_URL: ${UPPY_STRAPI_URL}
ports:
- "3020:3020"
volumes:
- ./packages/uppy/index.js:/app/index.js

View File

@ -34,6 +34,7 @@ if (!process.env.CDN_BUCKET_URL) throw new Error('CDN_BUCKET_URL is undefined in
* It's a 3 step process, with each step outlined in the function body.
*/
export async function createStreamInDb ({ source, platform, channel, date, url, userId }) {
throw new Error('createStreamInDb is deprecated.');
let vtuberId, streamId
@ -80,7 +81,7 @@ export async function createStreamInDb ({ source, platform, channel, date, url,
}
if (!vtuberId) {
console.log('>> vtuberId was not found so we create')
console.log('>> !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! (DEPRECATED FUNCTION) vtuberId was not found so we create')
/**
* We are creating a vtuber record.

View File

@ -113,7 +113,7 @@ export async function upsertVtuber({ platform, userId, url, channel }: Notificat
// get b2 cdn link to image
const imageCdnLink = `${process.env.CDN_BUCKET_URL}/${b2FileData.Key}`
console.log(`>>> createVtuberRes here we go 3-2-1, POST!`)
const createVtuberRes = await fetch(`${process.env.STRAPI_URL}/api/vtubers`, {
method: 'POST',
headers: {
@ -141,6 +141,7 @@ export async function upsertVtuber({ platform, userId, url, channel }: Notificat
console.log(`>>> vtuber created with id=${vtuberId}`)
}
}
if (!vtuberId) throw new Error(`upsertVtuber failed to produce a vtuberId! This should not happen under normal circumstances.`);
return vtuberId
}
export async function upsertPlatformNotification({ source, date, platform, vtuberId }: { source: string, date: string, platform: string, vtuberId: number }): Promise<number> {

View File

@ -36,6 +36,7 @@ export async function processEmail({
// Step 1
const vtuberId = await upsertVtuber({ url, platform, channel, displayName, date, userId, avatar })
console.log(` 🤠 upsertVtuber has completed, and the vtuberId=${vtuberId}`)
const pNotifId = await upsertPlatformNotification({ vtuberId, source: 'email', date, platform })
const streamId = await upsertStream({ date, vtuberId, platform, pNotifId })

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,10 @@ kubectl --namespace futureporn delete secret frp --ignore-not-found
kubectl --namespace futureporn create secret generic frp \
--from-literal=token=${FRP_TOKEN}
kubectl --namespace futureporn delete secret chisel --ignore-not-found
kubectl --namespace futureporn create secret generic chisel \
--from-literal=auth=${CHISEL_USERNAME}:${CHISEL_PASSWORD}
kubectl --namespace futureporn delete secret scout --ignore-not-found
kubectl --namespace futureporn create secret generic scout \
--from-literal=recentsToken=${SCOUT_RECENTS_TOKEN} \

View File

@ -26,6 +26,6 @@ kubectl -n futureporn exec postgres -- psql -U postgres --command "\
# kubectl exec -i POD_NAME -- pg_restore -U USERNAME -C -d DATABASE < dump.sql
kubectl -n futureporn cp /home/cj/Documents/futureporn-meta/backups/20240602T185141Z_development.psql postgres:/tmp/db.psql
kubectl -n futureporn cp /home/cj/Documents/futureporn-meta/backups/2024-06-18_20-35-38-futureporn-db.psql postgres:/tmp/db.psql
kubectl -n futureporn exec -i postgres -- pg_restore -U postgres -d futureporn_db /tmp/db.psql
# kubectl -n futureporn exec -ti db-postgresql-0 -- rm /tmp/db.psql

View File

@ -5,38 +5,11 @@
# deploy_cert_manager()
load('ext://dotenv', 'dotenv')
dotenv(fn='.env')
default_registry('localhost:5001')
# kubefwd all namespaces Tilt deploys to.
# v1alpha1.extension_repo(name='default', url='https://github.com/tilt-dev/tilt-extensions')
# v1alpha1.extension(name='kubefwd:config', repo_name='default', repo_path='kubefwd')
# v1alpha1.extension_repo(
# name='default',
# url='https://github.com/tilt-dev/tilt-extensions'
# )
# v1alpha1.extension(
# name='ngrok:config',
# repo_name='default',
# repo_path='ngrok',
# )
# args=['--default_config_file=%s' % os.getenv('TILT_NGROK_DEFAULT_CONFIG_FILE')]
load('ext://helm_remote', 'helm_remote')
# helm_remote(
# 'redis',
# repo_name='redis',
# repo_url='https://charts.bitnami.com/bitnami',
# namespace='futureporn',
# version='19.5.2',
# set=[
# 'auth.password=%s' % os.getenv('TRIGGER_REDIS_PASSWORD'),
# 'architecture=standalone',
# ],
# )
# allow_k8s_contexts('vke-e41885d3-7f93-4f01-bfaa-426f20bf9f3f')
# helm_remote(
# 'velero',
@ -356,12 +329,59 @@ k8s_resource(
# )
k8s_resource(workload='temporal-admintools', labels='temporal', resource_deps=['postgres'])
k8s_resource(workload='temporal-frontend', labels='temporal', port_forwards=['7233'], resource_deps=['postgres'])
k8s_resource(workload='temporal-history', labels='temporal', resource_deps=['postgres'])
k8s_resource(workload='temporal-worker', labels='temporal', resource_deps=['postgres'])
k8s_resource(workload='temporal-web', labels='temporal', port_forwards=['8080'], resource_deps=['postgres'])
k8s_resource(workload='temporal-schema-setup', labels='temporal', resource_deps=['postgres'])
k8s_resource(workload='temporal-schema-update', labels='temporal', resource_deps=['postgres'])
k8s_resource(workload='temporal-matching', labels='temporal', resource_deps=['postgres'])
k8s_resource(
workload='temporal-admintools',
labels='temporal',
resource_deps=[
'postgres',
'strapi'
])
k8s_resource(
workload='temporal-frontend',
labels='temporal', port_forwards=['7233'],
resource_deps=[
'postgres',
'strapi'
])
k8s_resource(
workload='temporal-history',
labels='temporal',
resource_deps=[
'postgres',
'strapi'
])
k8s_resource(
workload='temporal-worker',
labels='temporal',
resource_deps=[
'postgres',
'strapi'
])
k8s_resource(
workload='temporal-web',
labels='temporal', port_forwards=['8080'],
resource_deps=[
'postgres',
'strapi'
])
k8s_resource(
workload='temporal-schema-setup',
labels='temporal',
resource_deps=[
'postgres',
'strapi'
])
k8s_resource(
workload='temporal-schema-update',
labels='temporal',
resource_deps=[
'postgres',
'strapi'
])
k8s_resource(
workload='temporal-matching',
labels='temporal',
resource_deps=[
'postgres',
'strapi'
])