progress
ci / build (push) Failing after 8s Details

This commit is contained in:
Chris Grimmett 2024-04-20 22:34:14 +00:00
parent 6ea65301c4
commit 61d9010138
21 changed files with 170 additions and 484 deletions

View File

@ -1,15 +1,20 @@
include .env include .env
all: minikube secrets tilt dev: minikube secrets tilt
all: bootstrap secrets helmsman
bootstrap: bootstrap:
helm install --create-namespace -n crd-bootstrap crd-bootstrap oci://ghcr.io/skarlso/helm/crd-bootstrap --version v0.6.0 kubectl --kubeconfig /home/chris/.kube/vke.yaml apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.crds.yaml
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
helmsman: helmsman:
helmsman --apply -f ./helmsman.yaml helmsman --apply -f ./helmsman.yaml
deploy: bootstrap helmsman secrets deploy: helmsman secrets
tilt: tilt:
tilt up tilt up
@ -19,8 +24,12 @@ secrets:
kubectl create secret generic link2cid \ kubectl create secret generic link2cid \
--from-literal=apiKey=${LINK2CID_API_KEY} --from-literal=apiKey=${LINK2CID_API_KEY}
kubectl --namespace cert-manager delete secret vultr-credentials --ignore-not-found kubectl --namespace cert-manager delete secret vultr --ignore-not-found
kubectl --namespace cert-manager create secret generic vultr-credentials \ kubectl --namespace cert-manager create secret generic vultr \
--from-literal=apiKey=${VULTR_API_KEY}
kubectl --namespace windmill delete secret vultr --ignore-not-found
kubectl --namespace windmill create secret generic vultr \
--from-literal=apiKey=${VULTR_API_KEY} --from-literal=apiKey=${VULTR_API_KEY}
kubectl delete secret vultr --ignore-not-found kubectl delete secret vultr --ignore-not-found

View File

@ -1,13 +0,0 @@
apiVersion: delivery.crd-bootstrap/v1alpha1
kind: Bootstrap
metadata:
namespace: cert-manager
name: cert-manager-crd-bootstrap
spec:
interval: 10s
source:
helm:
chartReference: https://charts.jetstack.io
chartName: cert-manager
version:
semver: 1.14.4

View File

@ -51,7 +51,7 @@ spec:
- name: external-dns - name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.14.1 image: registry.k8s.io/external-dns/external-dns:v0.14.1
args: args:
- --source=service - --source=ingress
- --domain-filter=sbtp.xyz - --domain-filter=sbtp.xyz
- --provider=vultr - --provider=vultr
env: env:

View File

@ -19,5 +19,4 @@ spec:
protocol: TCP protocol: TCP
port: 5001 port: 5001
targetPort: 5001 targetPort: 5001
status:
loadBalancer: {}

View File

@ -0,0 +1,47 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-manager-webhook-vultr-secret-reader
namespace: cert-manager
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "watch", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager-webhook-vultr-secret-reader-binding
namespace: cert-manager
subjects:
- kind: ServiceAccount
name: cert-manager-webhook-vultr
namespace: cert-manager
roleRef:
kind: Role
name: cert-manager-webhook-vultr-secret-reader
apiGroup: rbac.authorization.k8s.io
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# server: https://acme-staging-v02.api.letsencrypt.org/directory
server: https://acme-v02.api.letsencrypt.org/directory
email: {{ .Values.adminEmail }}
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- dns01:
webhook:
groupName: acme.vultr.com
solverName: vultr
config:
apiKeySecretRef:
key: apiKey
name: vultr

View File

@ -1,50 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# You must replace this email address with your own.
# Let's Encrypt will use this to contact you about expiring
# certificates, and issues related to your account.
email: {{ .Values.adminEmail }}
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: letsencrypt-staging
solvers:
- dns01:
webhook:
groupName: acme.vultr.com
solverName: vultr
config:
apiKeySecretRef:
key: apiKey
name: vultr-credentials
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-manager-webhook-vultr:secret-reader
namespace: cert-manager
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["vultr-credentials"]
verbs: ["get", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager-webhook-vultr:secret-reader
namespace: cert-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cert-manager-webhook-vultr:secret-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager-webhook-vultr

View File

@ -1,51 +1,20 @@
# apiVersion: v1 apiVersion: v1
# kind: Service kind: Service
# metadata:
# name: link2cid
# annotations:
# external-dns.alpha.kubernetes.io/hostname: link2cid.sbtp.xyz
# service.beta.kubernetes.io/vultr-loadbalancer-label: "link2cid"
# service.beta.kubernetes.io/vultr-loadbalancer-ssl: "cert-manager-webhook-vultr-webhook-tls"
# service.beta.kubernetes.io/vultr-loadbalancer-protocol: "https"
# service.beta.kubernetes.io/vultr-loadbalancer-https-ports: "443"
# service.beta.kubernetes.io/vultr-loadbalancer-backend-protocol: "http"
# service.beta.kubernetes.io/vultr-loadbalancer-algorithm: "least_connections"
# service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-protocol: "http"
# service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-path: "/health"
# spec:
# selector:
# app: link2cid
# type: LoadBalancer
# ports:
# - name: http
# protocol: TCP
# port: 80
# targetPort: 3939
# - name: https
# protocol: TCP
# port: 443
# targetPort: 3939
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata: metadata:
name: link2cid-ingress name: link2cid
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec: spec:
ingressClassName: link2cid-ingress selector:
rules: app: link2cid
- host: link2cid.sbtp.xyz ports:
http: - name: http
paths: protocol: TCP
- path: /demo-path port: 80
pathType: Prefix targetPort: 3939
backend: - name: https
service: protocol: TCP
name: link2cid port: 443
port: targetPort: 3939
number: 3939
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
@ -67,41 +36,6 @@ spec:
ports: ports:
- containerPort: 3939 - containerPort: 3939
# ---
# apiVersion: apps/v1
# kind: Deployment
# metadata:
# name: link2cid
# spec:
# selector:
# matchLabels:
# app: link2cid
# template:
# metadata:
# labels:
# app: link2cid
# spec:
# containers:
# - name: link2cid
# image: {{ .Values.link2cid.containerName }}
# ports:
# - containerPort: 3333
# env:
# - name: IPFS_URL
# value: http://ipfs-service:5001
# - name: PORT
# value: "3333"
# - name: API_KEY
# valueFrom:
# secretKeyRef:
# name: link2cid
# key: apiKey
# resources:
# limits:
# cpu: 500m
# memory: 1024Mi
# restartPolicy: Always
--- ---
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
@ -123,20 +57,36 @@ spec:
{{ if eq .Values.managedBy "Helm" }} {{ if eq .Values.managedBy "Helm" }}
--- ---
apiVersion: cert-manager.io/v1 apiVersion: networking.k8s.io/v1
kind: Certificate kind: Ingress
metadata: metadata:
name: staging-cert-sbtp-xyz name: link2cid-ingress
namespace: default
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec: spec:
commonName: link2cid.sbtp.xyz ingressClassName: nginx
dnsNames:
- link2cid.sbtp.xyz
issuerRef:
name: letsencrypt-staging
kind: ClusterIssuer
secretName: sbtp-xyz-tls
secretTemplate: secretTemplate:
annotations: annotations:
reflector.v1.k8s.emberstack.com/reflection-allowed: "true" reflector.v1.k8s.emberstack.com/reflection-allowed: "true"
reflector.v1.k8s.emberstack.com/reflection-allowed-namespaces: "default" reflector.v1.k8s.emberstack.com/reflection-allowed-namespaces: ""
tls:
- hosts:
- link2cid.sbtp.xyz
secretName: link2cid-tls
rules:
- host: link2cid.sbtp.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: link2cid
port:
number: 80
---
{{ end }} {{ end }}

View File

@ -1,21 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: next-pod
labels:
app.kubernetes.io/name: next
spec:
containers:
- name: next
image: {{ .Values.next.containerName }}
env:
- name: HOSTNAME
value: 0.0.0.0
ports:
- containerPort: 3000
resources: {}
restartPolicy: OnFailure
resources:
limits:
cpu: 500m
memory: 1Gi

View File

@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: next-service
annotations:
service.beta.kubernetes.io/vultr-loadbalancer-protocol: "http"
service.beta.kubernetes.io/vultr-loadbalancer-algorithm: "least_connections"
spec:
type: LoadBalancer
selector:
name: next
ports:
- name: http
protocol: TCP
port: 3000
targetPort: 3000

View File

@ -1,33 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
annotations:
external-dns.alpha.kubernetes.io/hostname: nginx.sbtp.xyz
spec:
selector:
app: nginx
type: LoadBalancer
ports:
- protocol: TCP
port: 80
targetPort: 80

View File

@ -1,35 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: pgadmin-pod
labels:
app.kubernetes.io/name: pgadmin
spec:
containers:
- name: pgadmin
image: dpage/pgadmin4
ports:
- containerPort: 5050
resources:
limits:
cpu: 500m
memory: 1Gi
env:
- name: PGADMIN_LISTEN_PORT
value: '5050'
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: postgres
key: password
- name: PGADMIN_DEFAULT_PASSWORD
valueFrom:
secretKeyRef:
name: pgadmin
key: defaultPassword
- name: PGADMIN_DEFAULT_EMAIL
valueFrom:
secretKeyRef:
name: pgadmin
key: defaultEmail
restartPolicy: OnFailure

View File

@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: pgadmin-service
spec:
selector:
app.kubernetes.io/name: pgadmin
ports:
- name: web
protocol: TCP
port: 5050
targetPort: 5050
status:
loadBalancer: {}

View File

@ -1,30 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: postgres-pod
labels:
app.kubernetes.io/name: postgres
spec:
containers:
- name: postgres
image: postgres:16.0
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: postgres
key: password
ports:
- containerPort: 5432
resources:
limits:
cpu: 500m
memory: 1Gi
volumeMounts:
- name: postgres-pvc
mountPath: /data/postgres
restartPolicy: OnFailure
volumes:
- name: postgres-pvc
persistentVolumeClaim:
claimName: postgres-pvc

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-pvc
annotations:
meta.helm.sh/release-name: fp
meta.helm.sh/release-namespace: default
labels:
app.kubernetes.io/managed-by: {{ .Values.managedBy }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 40Gi
storageClassName: {{ .Values.storageClassName }}

View File

@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: postgres-service
spec:
selector:
app.kubernetes.io/name: postgres
ports:
- name: db
protocol: TCP
port: 5432
targetPort: 5432
status:
loadBalancer: {}

View File

@ -1,108 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: strapi-pod
spec:
containers:
- name: strapi-pod
image: {{ .Values.strapi.containerName }}
ports:
- containerPort: 1337
env:
- name: ADMIN_JWT_SECRET
valueFrom:
secretKeyRef:
name: strapi
key: adminJwtSecret
- name: API_TOKEN_SALT
valueFrom:
secretKeyRef:
name: strapi
key: apiTokenSalt
- name: APP_KEYS
valueFrom:
secretKeyRef:
name: strapi
key: appKeys
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: strapi
key: databaseUrl
- name: CDN_BUCKET_USC_URL
valueFrom:
secretKeyRef:
name: strapi
key: cdnBucketUscUrl
- name: DATABASE_CLIENT
value: postgres
- name: DATABASE_HOST
value: postgres-service
- name: DATABASE_NAME
value: futureporn-strapi
- name: JWT_SECRET
valueFrom:
secretKeyRef:
name: strapi
key: jwtSecret
- name: MUX_PLAYBACK_RESTRICTION_ID
valueFrom:
secretKeyRef:
name: strapi
key: muxPlaybackRestrictionId
- name: MUX_SIGNING_KEY_ID
valueFrom:
secretKeyRef:
name: strapi
key: muxSigningKeyId
- name: MUX_SIGNING_KEY_PRIVATE_KEY
valueFrom:
secretKeyRef:
name: strapi
key: muxSigningKeyPrivateKey
- name: NODE_ENV
value: production
- name: S3_USC_BUCKET_APPLICATION_KEY
valueFrom:
secretKeyRef:
name: strapi
key: s3UscBucketApplicationKey
- name: S3_USC_BUCKET_ENDPOINT
valueFrom:
secretKeyRef:
name: strapi
key: s3UscBucketEndpoint
- name: S3_USC_BUCKET_KEY_ID
valueFrom:
secretKeyRef:
name: strapi
key: s3UscBucketKeyId
- name: S3_USC_BUCKET_NAME
valueFrom:
secretKeyRef:
name: strapi
key: s3UscBucketName
- name: S3_USC_BUCKET_REGION
valueFrom:
secretKeyRef:
name: strapi
key: s3UscBucketRegion
- name: SENDGRID_API_KEY
valueFrom:
secretKeyRef:
name: strapi
key: sendgridApiKey
- name: STRAPI_URL
value: {{ .Values.strapi.url }}
- name: TRANSFER_TOKEN_SALT
valueFrom:
secretKeyRef:
name: strapi
key: transferTokenSalt
- name: PORT
value: "{{ .Values.strapi.port }}"
resources:
limits:
cpu: 500m
memory: 1Gi
restartPolicy: OnFailure

View File

@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: strapi-service
spec:
selector:
app.kubernetes.io/name: strapi
ports:
- name: web
protocol: TCP
port: 80
targetPort: 1337
status:
loadBalancer: {}

View File

@ -1,3 +1,3 @@
windmill helm chart is pulled in via ../../helmfile.yaml. windmill helm chart is pulled in via ../../helmsman.yaml.
This folder is here just to hold our values.yaml file for configuring windmill. This folder is here just to hold our values.yaml file for configuring windmill.

View File

@ -1,8 +1,8 @@
# windmill root values block # windmill root values block
windmill: windmill:
# domain as shown in browser, this is used together with `baseProtocol` as part of the BASE_URL environment variable in app and worker container and in the ingress resource, if enabled # domain as shown in browser, this is used together with `baseProtocol` as part of the BASE_URL environment variable in app and worker container and in the ingress resource, if enabled
baseDomain: windmill baseDomain: windmill2.sbtp.xyz
baseProtocol: http baseProtocol: https
# postgres URI, pods will crashloop if database is unreachable, sets DATABASE_URL environment variable in app and worker container # postgres URI, pods will crashloop if database is unreachable, sets DATABASE_URL environment variable in app and worker container
databaseUrl: postgres://postgres:windmill@windmill-postgresql/windmill?sslmode=disable databaseUrl: postgres://postgres:windmill@windmill-postgresql/windmill?sslmode=disable
# replica for the application app # replica for the application app
@ -72,14 +72,23 @@ windmill:
# enable postgres (bitnami) on kubernetes # enable postgres (bitnami) on kubernetes
postgresql: postgresql:
enabled: true enabled: true
primary:
persistence:
size: 40Gi
# enable minio (bitnami) on kubernetes # enable minio (bitnami) on kubernetes
minio: minio:
enabled: false enabled: false
# Configure Ingress # Configure Ingress
# ingress: ingress:
# className: "" className: "nginx"
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
tls:
- hosts:
- windmill2.sbtp.xyz
secretName: windmill-tls
# enable enterprise features # enable enterprise features
enterprise: enterprise:

View File

@ -1,20 +1,44 @@
# repositories: repositories:
# - name: jetstack - name: jetstack
# url: https://charts.jetstack.io url: https://charts.jetstack.io
# - name: vultr - name: vultr
# url: https://vultr.github.io/helm-charts url: https://vultr.github.io/helm-charts
- name: emberstack
url: https://emberstack.github.io/helm-charts
releases: releases:
- name: reflector
# - name: cert-manager namespace: default
# namespace: cert-manager chart: emberstack/reflector
# chart: jetstack/cert-manager
- name: cert-manager
namespace: cert-manager
chart: jetstack/cert-manager
set:
- name: installCRDs
value: true
values:
- charts/fp/values-prod.yaml
- name: cert-manager-webhook-vultr
namespace: cert-manager
chart: vultr/cert-manager-webhook-vultr
dependencies:
- version: ~v1.14.4
chart: jetstack/cert-manager
needs:
- cert-manager/cert-manager
- name: fp - name: fp
namespace: default namespace: default
chart: charts/fp chart: charts/fp
values: values:
- charts/fp/values-prod.yaml - charts/fp/values-prod.yaml
dependencies:
- version: ~v1.14.4
chart: jetstack/cert-manager
needs:
- cert-manager/cert-manager

View File

@ -1,16 +1,31 @@
namespaces: namespaces:
default: default:
cert-manager: cert-manager:
crd-bootstrap:
ingress-nginx: ingress-nginx:
metrics-server:
kcert:
windmill:
helmRepos: helmRepos:
jetstack: https://charts.jetstack.io jetstack: https://charts.jetstack.io
emberstack: https://emberstack.github.io/helm-charts emberstack: https://emberstack.github.io/helm-charts
vultr: https://vultr.github.io/helm-charts vultr: https://vultr.github.io/helm-charts
ingress-nginx: https://kubernetes.github.io/ingress-nginx ingress-nginx: https://kubernetes.github.io/ingress-nginx
metrics-server: https://kubernetes-sigs.github.io/metrics-server
windmill: https://windmill-labs.github.io/windmill-helm-charts
apps: apps:
windmill:
namespace: windmill
chart: "windmill/windmill"
enabled: true
version: "2.0.167"
valuesFile: "./charts/windmill/values.yaml"
metrics-server:
namespace: metrics-server
chart: "metrics-server/metrics-server"
enabled: true
version: "3.12.1"
ingress-nginx: ingress-nginx:
namespace: ingress-nginx namespace: ingress-nginx
chart: "ingress-nginx/ingress-nginx" chart: "ingress-nginx/ingress-nginx"
@ -32,8 +47,6 @@ apps:
chart: "jetstack/cert-manager" chart: "jetstack/cert-manager"
enabled: true enabled: true
version: "1.14.4" version: "1.14.4"
set:
installCRDs: true
reflector: reflector:
namespace: "default" namespace: "default"
chart: "emberstack/reflector" chart: "emberstack/reflector"