separate staging from prod

This commit is contained in:
CJ_Clippy 2024-06-19 17:43:25 -08:00
parent ad1d81b75a
commit 92a77d0876
21 changed files with 197 additions and 65 deletions

View File

@ -7,12 +7,17 @@ namespaces:
secrets: secrets:
./scripts/k8s-secrets.sh ./scripts/k8s-secrets.sh
flux: flux-prod:
./scripts/flux-bootstrap.sh ./scripts/flux-bootstrap-prod.sh
flux-staging:
./scripts/flux-bootstrap-staging.sh
dev: kind namespaces secrets chisel velero dev: kind namespaces secrets chisel velero
prod: namespaces secrets velero flux prod: namespaces secrets velero flux-prod
staging: namespaces secrets velero flux-staging
velero: velero:
./scripts/velero-create.sh ./scripts/velero-create.sh

View File

@ -42,9 +42,6 @@ spec:
url: https://strapi.piko.sbtp.xyz url: https://strapi.piko.sbtp.xyz
certIssuer: letsencrypt-staging certIssuer: letsencrypt-staging
hostname: strapi.futureporn.svc.cluster.local hostname: strapi.futureporn.svc.cluster.local
ingressClassName: ngrok
ngrok:
hostname: grateful-engaging-cicada.ngrok-free.app
realtime: realtime:
imageName: gitea.futureporn.net/futureporn/realtime:latest imageName: gitea.futureporn.net/futureporn/realtime:latest
adminEmail: cj@futureporn.net adminEmail: cj@futureporn.net

View File

@ -0,0 +1,17 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: fp
namespace: futureporn
spec:
values:
next:
certIssuer: letsencrypt-prod
hostname: next.futureporn.net
scout:
certIssuer: letsencrypt-prod
cdnBucketUrl: https://futureporn-b2.b-cdn.net
s3BucketName: futureporn-b2
strapi:
url: https://portal.futureporn.net
hostname: portal.futureporn.net

View File

@ -1,11 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1 apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
namespace: futureporn
resources: resources:
- ../base/podinfo
- ../base/temporal - ../base/temporal
- ../base/fp - ../base/fp
patches: patches:
- path: podinfo-values.yaml - path: fp-values.yaml
target: target:
kind: HelmRelease kind: HelmRelease
name: podinfo name: fp

View File

@ -1,21 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: podinfo
namespace: futureporn
spec:
chart:
spec:
version: ">=1.0.0"
values:
ingress:
hosts:
- host: podinfo.sbtp.xyz
paths:
- path: /
pathType: Prefix
backend:
service:
name: podinfo
port:
number: 9898

View File

@ -1,25 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: GitRepository
metadata:
name: chisel-operator
namespace: futureporn
spec:
interval: 5m
url: https://github.com/FyraLabs/chisel-operator
ref:
branch: master
---
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
kind: Kustomization
metadata:
name: chisel-operator
namespace: futureporn
spec:
interval: 10m
targetNamespace: futureporn
sourceRef:
kind: GitRepository
name: chisel-operator
path: "./kustomize"
prune: true

View File

@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: futureporn
resources:
- chisel.yaml

View File

@ -0,0 +1,17 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: fp
namespace: futureporn
spec:
values:
next:
certIssuer: letsencrypt-staging
hostname: next.sbtp.xyz
scout:
certIssuer: letsencrypt-staging
cdnBucketUrl: https://fp-dev.b-cdn.net
s3BucketName: fp-dev
strapi:
url: https://strapi.sbtp.xyz
hostname: strapi.sbtp.xyz

View File

@ -3,7 +3,14 @@ kind: Kustomization
namespace: futureporn namespace: futureporn
resources: resources:
- ../base/podinfo - ../base/podinfo
- ../base/temporal
- ../base/fp
patches: patches:
- path: podinfo-values.yaml - path: podinfo-values.yaml
target: target:
kind: HelmRelease kind: HelmRelease
name: podinfo
- path: fp-values.yaml
target:
kind: HelmRelease
name: fp

View File

@ -12,7 +12,7 @@ spec:
values: values:
ingress: ingress:
hosts: hosts:
- host: podinfo.staging - host: podinfo.sbtp.xyz
paths: paths:
- path: / - path: /
pathType: ImplementationSpecific pathType: ImplementationSpecific

View File

@ -4,7 +4,7 @@ metadata:
name: apps name: apps
namespace: flux-system namespace: flux-system
spec: spec:
interval: 10m0s interval: 1m0s
dependsOn: dependsOn:
- name: infra-configs - name: infra-configs
sourceRef: sourceRef:

View File

@ -14,6 +14,7 @@ spec:
path: ./infrastructure/controllers path: ./infrastructure/controllers
prune: true prune: true
wait: true wait: true
--- ---
apiVersion: kustomize.toolkit.fluxcd.io/v1 apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization kind: Kustomization

View File

@ -6,7 +6,7 @@ metadata:
spec: spec:
acme: acme:
# Replace the email address with your own contact email # Replace the email address with your own contact email
email: fluxcdbot@users.noreply.github.com email: cj@futureporn.net
# The server is replaced in /clusters/production/infrastructure.yaml # The server is replaced in /clusters/production/infrastructure.yaml
server: https://acme-staging-v02.api.letsencrypt.org/directory server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef: privateKeySecretRef:

View File

@ -3,6 +3,7 @@ apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:
name: cert-manager name: cert-manager
--- ---
apiVersion: source.toolkit.fluxcd.io/v1 apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository kind: HelmRepository
@ -12,6 +13,7 @@ metadata:
spec: spec:
interval: 24h interval: 24h
url: https://charts.jetstack.io url: https://charts.jetstack.io
--- ---
apiVersion: helm.toolkit.fluxcd.io/v2 apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease kind: HelmRelease

View File

@ -0,0 +1,12 @@
{
"name": "scripts",
"type": "module",
"version": "1.0.0",
"description": "",
"keywords": [],
"author": "",
"license": "Unlicense",
"dependencies": {
"dotenv": "^16.4.5"
}
}

View File

@ -0,0 +1,23 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
importers:
.:
dependencies:
dotenv:
specifier: ^16.4.5
version: 16.4.5
packages:
dotenv@16.4.5:
resolution: {integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==}
engines: {node: '>=12'}
snapshots:
dotenv@16.4.5: {}

View File

@ -0,0 +1,84 @@
#!/usr/bin/env node
import dotenv from 'dotenv'
dotenv.config({ path: '../../.env' })
const apiV2Base = 'https://api.vultr.com/v2'
if (!process.env.VULTR_API_KEY) throw new Error('VULTR_API_KEY is missing in env');
async function deleteOrphanedLoadBalancers() {
console.log('getting load balancers')
const loadBalancersRes = await fetch(`${apiV2Base}/load-balancers`, {
method: 'GET',
headers: {
'authorization': `Bearer ${process.env.VULTR_API_KEY}`
}
})
const loadBalancerJson = await loadBalancersRes.json()
const orphanedLoadBalancers = loadBalancerJson.load_balancers.filter((lb) => (lb.instances.length === 0))
console.log(`found ${orphanedLoadBalancers.length} orphaned load balancers.`)
console.log('waiting 1 second')
await new Promise((resolve) => { setTimeout(resolve, 1000) })
for (const lb of orphanedLoadBalancers) {
console.log(`deleting load balancer ${lb.id}`)
const deleteLoadBalancerRes = await fetch(`https://api.vultr.com/v2/load-balancers/${lb.id}`, {
method: 'DELETE',
headers: {
'Authorization': `Bearer ${process.env.VULTR_API_KEY}`
}
})
console.log('waiting 1 second')
await new Promise((resolve) => { setTimeout(resolve, 1000) })
}
}
async function deleteOrphanedBlockStorage() {
console.log('getting block storage')
const blocksRes = await fetch(`${apiV2Base}/blocks`, {
method: 'GET',
headers: {
'authorization': `Bearer ${process.env.VULTR_API_KEY}`
}
})
const blocksJson = await blocksRes.json()
const orphanedBlocks = blocksJson.blocks.filter((b) => b.attached_to_instance === '')
console.log(`found ${orphanedBlocks.length} orphaned block storages`)
console.log('waiting 1 second')
await new Promise((resolve) => { setTimeout(resolve, 1000) })
for (const block of orphanedBlocks) {
console.log(`deleting block ${block.id}`)
const deleteBlocksRes = await fetch(`${apiV2Base}/blocks/${block.id}`, {
method: 'DELETE',
headers: {
'Authorization': `Bearer ${process.env.VULTR_API_KEY}`
}
})
console.log('waiting 1 second')
await new Promise((resolve) => { setTimeout(resolve, 1000) })
}
}
async function main() {
await deleteOrphanedLoadBalancers()
await deleteOrphanedBlockStorage()
}
main()

View File

View File

@ -0,0 +1,17 @@
#!/bin/bash
## this way is annoying because deployment asks for git password
# flux bootstrap git \
# --kubeconfig /home/cj/.kube/vke.yaml \
# --url=https://gitea.futureporn.net/futureporn/fp.git \
# --branch=main \
# --username=cj_clippy \
# --token-auth=true \
# --path=clusters/staging
## this way is more automatic although it does ask for yes/no confirmation that the ssh key has repo access
flux bootstrap git \
--url="ssh://git@gitea.futureporn.net:2222/futureporn/fp" \
--branch=main \
--path="clusters/staging" \
--private-key-file=/home/cj/.ssh/fp-flux

3
scripts/postgres-restore.sh Normal file → Executable file
View File

@ -1,5 +1,6 @@
#!/bin/bash #!/bin/bash
dbname=2024-06-19_22-24-03-futureporn-db.psql
## drop futureporn_db ## drop futureporn_db
kubectl -n futureporn exec postgres -- psql -U postgres --command "DROP DATABASE futureporn_db;" kubectl -n futureporn exec postgres -- psql -U postgres --command "DROP DATABASE futureporn_db;"
@ -26,6 +27,6 @@ kubectl -n futureporn exec postgres -- psql -U postgres --command "\
# kubectl exec -i POD_NAME -- pg_restore -U USERNAME -C -d DATABASE < dump.sql # kubectl exec -i POD_NAME -- pg_restore -U USERNAME -C -d DATABASE < dump.sql
kubectl -n futureporn cp /home/cj/Documents/futureporn-meta/backups/2024-06-18_20-35-38-futureporn-db.psql postgres:/tmp/db.psql kubectl -n futureporn cp /home/cj/Documents/futureporn-meta/backups/$dbname postgres:/tmp/db.psql
kubectl -n futureporn exec -i postgres -- pg_restore -U postgres -d futureporn_db /tmp/db.psql kubectl -n futureporn exec -i postgres -- pg_restore -U postgres -d futureporn_db /tmp/db.psql
# kubectl -n futureporn exec -ti db-postgresql-0 -- rm /tmp/db.psql # kubectl -n futureporn exec -ti db-postgresql-0 -- rm /tmp/db.psql