separate staging from prod
This commit is contained in:
parent
ad1d81b75a
commit
92a77d0876
11
Makefile
11
Makefile
|
@ -7,12 +7,17 @@ namespaces:
|
|||
secrets:
|
||||
./scripts/k8s-secrets.sh
|
||||
|
||||
flux:
|
||||
./scripts/flux-bootstrap.sh
|
||||
flux-prod:
|
||||
./scripts/flux-bootstrap-prod.sh
|
||||
|
||||
flux-staging:
|
||||
./scripts/flux-bootstrap-staging.sh
|
||||
|
||||
dev: kind namespaces secrets chisel velero
|
||||
|
||||
prod: namespaces secrets velero flux
|
||||
prod: namespaces secrets velero flux-prod
|
||||
|
||||
staging: namespaces secrets velero flux-staging
|
||||
|
||||
velero:
|
||||
./scripts/velero-create.sh
|
||||
|
|
|
@ -42,9 +42,6 @@ spec:
|
|||
url: https://strapi.piko.sbtp.xyz
|
||||
certIssuer: letsencrypt-staging
|
||||
hostname: strapi.futureporn.svc.cluster.local
|
||||
ingressClassName: ngrok
|
||||
ngrok:
|
||||
hostname: grateful-engaging-cicada.ngrok-free.app
|
||||
realtime:
|
||||
imageName: gitea.futureporn.net/futureporn/realtime:latest
|
||||
adminEmail: cj@futureporn.net
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: fp
|
||||
namespace: futureporn
|
||||
spec:
|
||||
values:
|
||||
next:
|
||||
certIssuer: letsencrypt-prod
|
||||
hostname: next.futureporn.net
|
||||
scout:
|
||||
certIssuer: letsencrypt-prod
|
||||
cdnBucketUrl: https://futureporn-b2.b-cdn.net
|
||||
s3BucketName: futureporn-b2
|
||||
strapi:
|
||||
url: https://portal.futureporn.net
|
||||
hostname: portal.futureporn.net
|
|
@ -1,11 +1,11 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
namespace: futureporn
|
||||
resources:
|
||||
- ../base/podinfo
|
||||
- ../base/temporal
|
||||
- ../base/fp
|
||||
patches:
|
||||
- path: podinfo-values.yaml
|
||||
- path: fp-values.yaml
|
||||
target:
|
||||
kind: HelmRelease
|
||||
name: podinfo
|
||||
name: fp
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: futureporn
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
version: ">=1.0.0"
|
||||
values:
|
||||
ingress:
|
||||
hosts:
|
||||
- host: podinfo.sbtp.xyz
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: podinfo
|
||||
port:
|
||||
number: 9898
|
|
@ -1,25 +0,0 @@
|
|||
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: GitRepository
|
||||
metadata:
|
||||
name: chisel-operator
|
||||
namespace: futureporn
|
||||
spec:
|
||||
interval: 5m
|
||||
url: https://github.com/FyraLabs/chisel-operator
|
||||
ref:
|
||||
branch: master
|
||||
---
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: chisel-operator
|
||||
namespace: futureporn
|
||||
spec:
|
||||
interval: 10m
|
||||
targetNamespace: futureporn
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: chisel-operator
|
||||
path: "./kustomize"
|
||||
prune: true
|
|
@ -1,5 +0,0 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
namespace: futureporn
|
||||
resources:
|
||||
- chisel.yaml
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: fp
|
||||
namespace: futureporn
|
||||
spec:
|
||||
values:
|
||||
next:
|
||||
certIssuer: letsencrypt-staging
|
||||
hostname: next.sbtp.xyz
|
||||
scout:
|
||||
certIssuer: letsencrypt-staging
|
||||
cdnBucketUrl: https://fp-dev.b-cdn.net
|
||||
s3BucketName: fp-dev
|
||||
strapi:
|
||||
url: https://strapi.sbtp.xyz
|
||||
hostname: strapi.sbtp.xyz
|
|
@ -3,7 +3,14 @@ kind: Kustomization
|
|||
namespace: futureporn
|
||||
resources:
|
||||
- ../base/podinfo
|
||||
- ../base/temporal
|
||||
- ../base/fp
|
||||
patches:
|
||||
- path: podinfo-values.yaml
|
||||
target:
|
||||
kind: HelmRelease
|
||||
name: podinfo
|
||||
- path: fp-values.yaml
|
||||
target:
|
||||
kind: HelmRelease
|
||||
name: fp
|
||||
|
|
|
@ -12,7 +12,7 @@ spec:
|
|||
values:
|
||||
ingress:
|
||||
hosts:
|
||||
- host: podinfo.staging
|
||||
- host: podinfo.sbtp.xyz
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
|
|
|
@ -4,7 +4,7 @@ metadata:
|
|||
name: apps
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 10m0s
|
||||
interval: 1m0s
|
||||
dependsOn:
|
||||
- name: infra-configs
|
||||
sourceRef:
|
||||
|
|
|
@ -14,6 +14,7 @@ spec:
|
|||
path: ./infrastructure/controllers
|
||||
prune: true
|
||||
wait: true
|
||||
|
||||
---
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||
kind: Kustomization
|
||||
|
|
|
@ -6,7 +6,7 @@ metadata:
|
|||
spec:
|
||||
acme:
|
||||
# Replace the email address with your own contact email
|
||||
email: fluxcdbot@users.noreply.github.com
|
||||
email: cj@futureporn.net
|
||||
# The server is replaced in /clusters/production/infrastructure.yaml
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
|
|
|
@ -3,6 +3,7 @@ apiVersion: v1
|
|||
kind: Namespace
|
||||
metadata:
|
||||
name: cert-manager
|
||||
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: HelmRepository
|
||||
|
@ -12,6 +13,7 @@ metadata:
|
|||
spec:
|
||||
interval: 24h
|
||||
url: https://charts.jetstack.io
|
||||
|
||||
---
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"name": "scripts",
|
||||
"type": "module",
|
||||
"version": "1.0.0",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "Unlicense",
|
||||
"dependencies": {
|
||||
"dotenv": "^16.4.5"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
lockfileVersion: '9.0'
|
||||
|
||||
settings:
|
||||
autoInstallPeers: true
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
importers:
|
||||
|
||||
.:
|
||||
dependencies:
|
||||
dotenv:
|
||||
specifier: ^16.4.5
|
||||
version: 16.4.5
|
||||
|
||||
packages:
|
||||
|
||||
dotenv@16.4.5:
|
||||
resolution: {integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
snapshots:
|
||||
|
||||
dotenv@16.4.5: {}
|
|
@ -0,0 +1,84 @@
|
|||
#!/usr/bin/env node
|
||||
|
||||
|
||||
import dotenv from 'dotenv'
|
||||
dotenv.config({ path: '../../.env' })
|
||||
|
||||
const apiV2Base = 'https://api.vultr.com/v2'
|
||||
|
||||
|
||||
if (!process.env.VULTR_API_KEY) throw new Error('VULTR_API_KEY is missing in env');
|
||||
|
||||
|
||||
|
||||
async function deleteOrphanedLoadBalancers() {
|
||||
console.log('getting load balancers')
|
||||
const loadBalancersRes = await fetch(`${apiV2Base}/load-balancers`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'authorization': `Bearer ${process.env.VULTR_API_KEY}`
|
||||
}
|
||||
})
|
||||
const loadBalancerJson = await loadBalancersRes.json()
|
||||
const orphanedLoadBalancers = loadBalancerJson.load_balancers.filter((lb) => (lb.instances.length === 0))
|
||||
console.log(`found ${orphanedLoadBalancers.length} orphaned load balancers.`)
|
||||
console.log('waiting 1 second')
|
||||
await new Promise((resolve) => { setTimeout(resolve, 1000) })
|
||||
|
||||
for (const lb of orphanedLoadBalancers) {
|
||||
console.log(`deleting load balancer ${lb.id}`)
|
||||
|
||||
const deleteLoadBalancerRes = await fetch(`https://api.vultr.com/v2/load-balancers/${lb.id}`, {
|
||||
method: 'DELETE',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${process.env.VULTR_API_KEY}`
|
||||
}
|
||||
})
|
||||
|
||||
console.log('waiting 1 second')
|
||||
await new Promise((resolve) => { setTimeout(resolve, 1000) })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
async function deleteOrphanedBlockStorage() {
|
||||
console.log('getting block storage')
|
||||
|
||||
const blocksRes = await fetch(`${apiV2Base}/blocks`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'authorization': `Bearer ${process.env.VULTR_API_KEY}`
|
||||
}
|
||||
})
|
||||
const blocksJson = await blocksRes.json()
|
||||
|
||||
const orphanedBlocks = blocksJson.blocks.filter((b) => b.attached_to_instance === '')
|
||||
console.log(`found ${orphanedBlocks.length} orphaned block storages`)
|
||||
|
||||
console.log('waiting 1 second')
|
||||
await new Promise((resolve) => { setTimeout(resolve, 1000) })
|
||||
for (const block of orphanedBlocks) {
|
||||
console.log(`deleting block ${block.id}`)
|
||||
|
||||
const deleteBlocksRes = await fetch(`${apiV2Base}/blocks/${block.id}`, {
|
||||
method: 'DELETE',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${process.env.VULTR_API_KEY}`
|
||||
}
|
||||
})
|
||||
|
||||
console.log('waiting 1 second')
|
||||
await new Promise((resolve) => { setTimeout(resolve, 1000) })
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
async function main() {
|
||||
await deleteOrphanedLoadBalancers()
|
||||
await deleteOrphanedBlockStorage()
|
||||
}
|
||||
|
||||
main()
|
|
@ -0,0 +1,17 @@
|
|||
#!/bin/bash
|
||||
|
||||
## this way is annoying because deployment asks for git password
|
||||
# flux bootstrap git \
|
||||
# --kubeconfig /home/cj/.kube/vke.yaml \
|
||||
# --url=https://gitea.futureporn.net/futureporn/fp.git \
|
||||
# --branch=main \
|
||||
# --username=cj_clippy \
|
||||
# --token-auth=true \
|
||||
# --path=clusters/staging
|
||||
|
||||
## this way is more automatic although it does ask for yes/no confirmation that the ssh key has repo access
|
||||
flux bootstrap git \
|
||||
--url="ssh://git@gitea.futureporn.net:2222/futureporn/fp" \
|
||||
--branch=main \
|
||||
--path="clusters/staging" \
|
||||
--private-key-file=/home/cj/.ssh/fp-flux
|
|
@ -1,5 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
dbname=2024-06-19_22-24-03-futureporn-db.psql
|
||||
|
||||
## drop futureporn_db
|
||||
kubectl -n futureporn exec postgres -- psql -U postgres --command "DROP DATABASE futureporn_db;"
|
||||
|
@ -26,6 +27,6 @@ kubectl -n futureporn exec postgres -- psql -U postgres --command "\
|
|||
# kubectl exec -i POD_NAME -- pg_restore -U USERNAME -C -d DATABASE < dump.sql
|
||||
|
||||
|
||||
kubectl -n futureporn cp /home/cj/Documents/futureporn-meta/backups/2024-06-18_20-35-38-futureporn-db.psql postgres:/tmp/db.psql
|
||||
kubectl -n futureporn cp /home/cj/Documents/futureporn-meta/backups/$dbname postgres:/tmp/db.psql
|
||||
kubectl -n futureporn exec -i postgres -- pg_restore -U postgres -d futureporn_db /tmp/db.psql
|
||||
# kubectl -n futureporn exec -ti db-postgresql-0 -- rm /tmp/db.psql
|
||||
# kubectl -n futureporn exec -ti db-postgresql-0 -- rm /tmp/db.psql
|
||||
|
|
Loading…
Reference in New Issue