progress
ci / build (push) Waiting to run Details

This commit is contained in:
CJ_Clippy 2024-06-28 15:23:04 -08:00
parent ace373283b
commit 14bc633237
20 changed files with 551 additions and 134 deletions

View File

@ -8,15 +8,19 @@ namespaces:
secrets:
dotenvx run -f .env.$(ENV) -- ./scripts/k8s-secrets.sh
flux:
./scripts/flux-bootstrap.sh
dev: kind namespaces secrets chisel velero
prod: export ENV=production namespaces secrets velero chisel flux
prod: echoenv namespaces secrets velero chisel flux
staging: export ENV=staging namespaces secrets velero chisel flux
staging: echoenv namespaces secrets velero flux
echoenv:
echo "[echoenv] Using ${ENV} environment. If this is not what you want, export ENV=development|staging|production"
scrap: namespaces secrets velero chisel flux
velero:
./scripts/velero-create.sh
@ -25,6 +29,9 @@ tilt:
kind get kubeconfig > ~/.kube/kind.yaml
KUBECONFIG=~/.kube/kind.yaml tilt up -f ./t.wip.tiltfile
exoscale:
kubectl apply -f https://raw.githubusercontent.com/exoscale/cert-manager-webhook-exoscale/master/deploy/exoscale-webhook-kustomize/deploy.yaml
kind:
./scripts/kind-with-local-registry.sh
@ -36,7 +43,7 @@ clean:
dotenvx run -f .env.$(ENV) -- node ./packages/infra/vultr-delete-orphaned-resources.js
deps:
echo "Some of the install methods for these dependencies are not cross-platform compatible. Some of the install methods are not well-tested. Expect this to fail. Please consult the Makefile for URLs to project sources."
echo "Some of the install methods for these dependencies are not cross-platform compatible. Some of the install methods are not tested. Expect this to fail. Please consult the Makefile for URLs to project sources."
sudo pamac install make entr nvm kubectl docker helm expect
curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | bash
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
@ -48,7 +55,9 @@ deps:
newgrp docker
curl -OL 'https://github.com/vmware-tanzu/velero/releases/download/v1.13.2/velero-v1.13.2-linux-amd64.tar.gz'
npm install -g @dotenvx/dotenvx
OS=$(go env GOOS); ARCH=$(go env GOARCH); curl -fsSL -o cmctl https://github.com/cert-manager/cmctl/releases/latest/download/cmctl_${OS}_${ARCH}
chmod +x cmctl
sudo mv cmctl /usr/local/bin
# A gitea act runner which runs locally
# https://docs.gitea.com/next/usage/actions/overview

View File

@ -21,7 +21,6 @@ spec:
imageName: gitea.futureporn.net/futureporn/link2cid:latest
next:
imageName: gitea.futureporn.net/futureporn/next:latest
certIssuer: letsencrypt-staging
hostname: next.futureporn.svc.cluster.local
ingressClassName: traefik
capture:
@ -33,7 +32,6 @@ spec:
imageName: gitea.futureporn.net/futureporn/scout-worker:latest
replicas: 1
pubsubServerUrl: https://realtime.futureporn.svc.cluster.local/faye
certIssuer: letsencrypt-staging
hostname: next.futureporn.svc.cluster.local
cdnBucketUrl: https://fp-dev.b-cdn.net
s3BucketName: fp-dev
@ -41,7 +39,6 @@ spec:
imageName: gitea.futureporn.net/futureporn/strapi:latest
port: 1339
url: https://strapi.piko.sbtp.xyz
certIssuer: letsencrypt-staging
hostname: strapi.futureporn.svc.cluster.local
ingressClassName: traefik
realtime:
@ -49,3 +46,5 @@ spec:
adminEmail: cj@futureporn.net
chisel:
exitNodeIp: "155.138.254.201"
certManager:
issuer: letsencrypt-staging

View File

@ -33,7 +33,7 @@ spec:
traefik.ingress.kubernetes.io/router.tls.domains.0.main: podinfo.sbtp.xyz
traefik.ingress.kubernetes.io/router.entrypoints: http
traefik.ingress.kubernetes.io/service.nativelb: podinfo
cert-manager.io/cluster-issuer: "letsencrypt"
cert-manager.io/cluster-issuer: "letsencrypt-staging"
# ingress.kubernetes.io/ssl-redirect: "true"
hosts:
- host: podinfo.sbtp.xyz

View File

@ -9,9 +9,10 @@ spec:
certIssuer: letsencrypt-prod
hostname: next.futureporn.net
scout:
certIssuer: letsencrypt-prod
cdnBucketUrl: https://futureporn-b2.b-cdn.net
s3BucketName: futureporn-b2
strapi:
url: https://portal.futureporn.net
hostname: portal.futureporn.net
certManager:
issuer: letsencrypt-production

View File

@ -6,13 +6,13 @@ metadata:
spec:
values:
next:
certIssuer: letsencrypt-staging
hostname: next.sbtp.xyz
scout:
certIssuer: letsencrypt-staging
cdnBucketUrl: https://fp-dev.b-cdn.net
s3BucketName: fp-dev
strapi:
url: https://strapi.sbtp.xyz
hostname: strapi.sbtp.xyz
managedBy: Helm
certManager:
issuer: letsencrypt-staging

View File

@ -0,0 +1,127 @@
## roles to deal with the following error
## exoscale.acme.exoscale.com is forbidden: User "system:serviceaccount:cert-manager:cert-manager" cannot create resource "exoscale"
## cannot create resource "exoscale" in API group "acme.exoscale.com" at the cluster scope
# I don't think I need the following commented yaml. I think the problem I was seeing was actually caused by the lack of cert-manager-webhook-exoscale
# ---
# apiVersion: rbac.authorization.k8s.io/v1
# kind: ClusterRole
# metadata:
# name: cert-manager-webhook-exoscale
# namespace: cert-manager
# labels:
# app: webhook
# rules:
# - apiGroups:
# - acme.exoscale.com
# resources:
# - '*'
# verbs:
# - 'create'
# ---
# apiVersion: rbac.authorization.k8s.io/v1
# kind: ClusterRoleBinding
# metadata:
# name: cert-manager-webhook-exoscale
# namespace: cert-manager
# labels:
# app: webhook
# roleRef:
# apiGroup: rbac.authorization.k8s.io
# kind: ClusterRole
# name: cert-manager-webhook-exoscale
# subjects:
# - apiGroup: ""
# kind: ServiceAccount
# name: cert-manager
# namespace: cert-manager
## Role & RoleBinding to give cert-manager access to the exoscale secret
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-manager-webhook-exoscale:secret-reader
namespace: cert-manager
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["exoscale"]
verbs: ["get", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager-webhook-exoscale:secret-reader
namespace: cert-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cert-manager-webhook-exoscale:secret-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager-webhook-exoscale
---
{{ if ne .Values.environment "production" }}
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# You must replace this email address with your own.
# Let's Encrypt will use this to contact you about expiring
# certificates, and issues related to your account.
email: {{ .Values.adminEmail }}
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: letsencrypt-staging
solvers:
- dns01:
webhook:
groupName: acme.exoscale.com
solverName: exoscale
config:
apiKeyRef:
name: exoscale
key: apiKey
apiSecretRef:
name: exoscale
key: apiSecret
{{ end }}
{{ if eq .Values.environment "production" }}
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# server: https://acme-staging-v02.api.letsencrypt.org/directory
server: https://acme-v02.api.letsencrypt.org/directory
email: {{ .Values.adminEmail }}
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- dns01:
webhook:
groupName: acme.exoscale.com
solverName: exoscale
config:
apiKeyRef:
name: exoscale
key: apiKey
apiSecretRef:
name: exoscale
key: apiSecret
{{ end }}

View File

@ -28,7 +28,7 @@ metadata:
namespace: futureporn
annotations:
external-dns.alpha.kubernetes.io/hostname: "{{ .Values.echo.hostname }}"
# chisel-operator.io/exit-node-name: "echo-exit-node"
chisel-operator.io/exit-node-name: "echo-exit-node"
spec:
selector:
app: echo
@ -59,7 +59,7 @@ metadata:
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web
traefik.ingress.kubernetes.io/router.tls: "false"
traefik.ingress.kubernetes.io/router.tls.certresolver: le
# cert-manager.io/cluster-issuer: "{{ .Values.certManager.issuer }}"
spec:
rules:
- host: "{{ .Values.echo.hostname }}"
@ -72,17 +72,5 @@ spec:
name: echo
port:
number: 8001
# ---
# apiVersion: traefik.io/v1alpha1
# kind: IngressRoute
# metadata:
# name: echo
# namespace: futureporn
# spec:
# routes:
# - match: Host(`echo.sbtp.xyz`)
# kind: Rule
# services:
# - name: echo
# port: 8001
tls:
- secretName: echo-cert

View File

@ -0,0 +1,321 @@
## copied from https://github.com/exoscale/cert-manager-webhook-exoscale
---
# Source: exoscale-webhook/templates/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: cert-manager-webhook-exoscale
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
---
# Source: exoscale-webhook/templates/rbac.yaml
# Grant cert-manager permission to validate using our apiserver
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-webhook-exoscale:domain-solver
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
rules:
- apiGroups:
- acme.exoscale.com
resources:
- '*'
verbs:
- 'create'
---
# Source: exoscale-webhook/templates/rbac.yaml
# apiserver gets the auth-delegator role to delegate auth decisions to
# the core apiserver
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-webhook-exoscale:auth-delegator
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager-webhook-exoscale
namespace: cert-manager
---
# Source: exoscale-webhook/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-webhook-exoscale:domain-solver
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-webhook-exoscale:domain-solver
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager
namespace: cert-manager
---
# Source: exoscale-webhook/templates/rbac.yaml
# Grant the webhook permission to read the secrets containing the credentials
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-manager-webhook-exoscale:secrets-reader
namespace: cert-manager
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
rules:
- apiGroups:
- ''
resources:
- 'secrets'
verbs:
- 'get'
---
# Source: exoscale-webhook/templates/rbac.yaml
# Grant the webhook permission to read the ConfigMap containing the Kubernetes
# apiserver's requestheader-ca-certificate.
# This ConfigMap is automatically created by the Kubernetes apiserver.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager-webhook-exoscale:webhook-authentication-reader
namespace: kube-system
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager-webhook-exoscale
namespace: cert-manager
---
# Source: exoscale-webhook/templates/rbac.yaml
# Grant the webhook permission to read the secrets containing the credentials
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager-webhook-exoscale:secrets-reader
namespace: cert-manager
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cert-manager-webhook-exoscale:secrets-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager-webhook-exoscale
namespace: cert-manager
---
# Source: exoscale-webhook/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: cert-manager-webhook-exoscale
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
spec:
type: ClusterIP
ports:
- port: 443
targetPort: https
protocol: TCP
name: https
selector:
app: exoscale-webhook
release: exoscale-webhook
---
# Source: exoscale-webhook/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: cert-manager-webhook-exoscale
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
spec:
replicas:
selector:
matchLabels:
app: exoscale-webhook
release: exoscale-webhook
template:
metadata:
labels:
app: exoscale-webhook
release: exoscale-webhook
spec:
serviceAccountName: cert-manager-webhook-exoscale
containers:
- name: exoscale-webhook
image: "exoscale/cert-manager-webhook-exoscale:latest"
imagePullPolicy: IfNotPresent
args:
- --tls-cert-file=/tls/tls.crt
- --tls-private-key-file=/tls/tls.key
env:
- name: GROUP_NAME
value: "acme.exoscale.com"
- name: EXOSCALE_DEBUG
value: ""
- name: EXOSCALE_API_TRACE
value: ""
ports:
- name: https
containerPort: 443
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTPS
path: /healthz
port: https
readinessProbe:
httpGet:
scheme: HTTPS
path: /healthz
port: https
volumeMounts:
- name: certs
mountPath: /tls
readOnly: true
resources:
{}
volumes:
- name: certs
secret:
secretName: cert-manager-webhook-exoscale-webhook-tls
---
# Source: exoscale-webhook/templates/apiservice.yaml
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1alpha1.acme.exoscale.com
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
annotations:
cert-manager.io/inject-ca-from: "cert-manager/cert-manager-webhook-exoscale-webhook-tls"
spec:
group: acme.exoscale.com
groupPriorityMinimum: 1000
versionPriority: 15
service:
name: cert-manager-webhook-exoscale
namespace: cert-manager
version: v1alpha1
---
# Source: exoscale-webhook/templates/pki.yaml
# Generate a CA Certificate used to sign certificates for the webhook
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: cert-manager-webhook-exoscale-ca
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
spec:
secretName: cert-manager-webhook-exoscale-ca
duration: 43800h # 5y
issuerRef:
name: cert-manager-webhook-exoscale-selfsign
commonName: "ca.exoscale-webhook.cert-manager"
isCA: true
---
# Source: exoscale-webhook/templates/pki.yaml
# Finally, generate a serving certificate for the webhook to use
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: cert-manager-webhook-exoscale-webhook-tls
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
spec:
secretName: cert-manager-webhook-exoscale-webhook-tls
duration: 8760h # 1y
issuerRef:
name: cert-manager-webhook-exoscale-ca
dnsNames:
- cert-manager-webhook-exoscale
- cert-manager-webhook-exoscale.cert-manager
- cert-manager-webhook-exoscale.cert-manager.svc
---
# Source: exoscale-webhook/templates/pki.yaml
# Create a selfsigned Issuer, in order to create a root CA certificate for
# signing webhook serving certificates
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: cert-manager-webhook-exoscale-selfsign
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
spec:
selfSigned: {}
---
# Source: exoscale-webhook/templates/pki.yaml
# Create an Issuer that uses the above generated CA certificate to issue certs
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: cert-manager-webhook-exoscale-ca
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
spec:
ca:
secretName: cert-manager-webhook-exoscale-ca

View File

@ -1,49 +0,0 @@
{{ if ne .Values.environment "production" }}
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# You must replace this email address with your own.
# Let's Encrypt will use this to contact you about expiring
# certificates, and issues related to your account.
email: {{ .Values.adminEmail }}
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: letsencrypt-staging
solvers:
- dns01:
webhook:
groupName: acme.vultr.com
solverName: vultr
config:
apiKeySecretRef:
key: apiKey
name: vultr
{{ end }}
{{ if eq .Values.environment "production" }}
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# server: https://acme-staging-v02.api.letsencrypt.org/directory
server: https://acme-v02.api.letsencrypt.org/directory
email: {{ .Values.adminEmail }}
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- dns01:
webhook:
groupName: acme.vultr.com
solverName: vultr
config:
apiKeySecretRef:
key: apiKey
name: vultr
{{ end }}

View File

@ -51,7 +51,7 @@ metadata:
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: http
traefik.ingress.kubernetes.io/router.tls: "true"
traefik.ingress.kubernetes.io/tls.certresolver: le
cert-manager.io/cluster-issuer: "{{ .Values.certManager.issuer }}"
kubernetes.io/ingress.class: traefik
external-dns.alpha.kubernetes.io/hostname: "{{ .Values.next.hostname }}"
spec:
@ -98,8 +98,8 @@ metadata:
namespace: futureporn
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web
traefik.ingress.kubernetes.io/router.tls: "false"
traefik.ingress.kubernetes.io/router.tls.certresolver: le
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "{{ .Values.certManager.issuer }}"
spec:
rules:
- host: next.fp.sbtp.xyz
@ -112,3 +112,7 @@ spec:
name: next
port:
number: 3000
tls:
- hosts:
- "{{ .Values.next.hostname }}"
secretName: next-cert

View File

@ -162,18 +162,14 @@ metadata:
namespace: futureporn
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: web
traefik.ingress.kubernetes.io/router.entrypoints: websecure,web
traefik.ingress.kubernetes.io/router.tls: "false"
traefik.ingress.kubernetes.io/router.tls.certresolver: le
cert-manager.io/cluster-issuer: "{{ .Values.certManager.issuer }}"
spec:
ingressClassName: "{{ .Values.strapi.ingressClassName }}"
backend:
serviceName: strapi
servicePort: 1339
# tls:
# - secretName: strapi-tls
# hosts:
# - "{{ .Values.strapi.hostname }}"
tls:
- hosts:
- "{{ .Values.strapi.hostname }}"
secretName: strapi-cert
rules:
- host: "{{ .Values.strapi.hostname }}"
http:
@ -198,18 +194,3 @@ spec:
port: 9090
auth: chisel
{{ end }}
# annotations:
# spec:
# rules:
# - host: echo.sbtp.xyz
# http:
# paths:
# - path: /
# pathType: Prefix
# backend:
# service:
# name: echo
# port:
# number: 8001

View File

@ -38,3 +38,5 @@ spec:
auth: chisel
{{ end }}

View File

@ -5,7 +5,6 @@ link2cid:
imageName: fp/link2cid
next:
imageName: fp/next
certIssuer: letsencrypt-staging
hostname: next.fp.sbtp.xyz
ingressClassName: traefik
capture:
@ -17,7 +16,6 @@ scout:
imageName: fp/scout-worker
replicas: 1
pubsubServerUrl: https://realtime.futureporn.svc.cluster.local/faye
certIssuer: letsencrypt-staging
hostname: next.futureporn.svc.cluster.local
cdnBucketUrl: https://fp-dev.b-cdn.net
s3BucketName: fp-dev
@ -25,7 +23,6 @@ strapi:
imageName: fp/strapi
port: 1339
url: https://strapi.fp.sbtp.xyz
certIssuer: letsencrypt-staging
hostname: strapi.fp.sbtp.xyz
ingressClassName: traefik
ngrok:
@ -37,3 +34,5 @@ chisel:
exitNodeIp: "155.138.254.201"
echo:
hostname: echo.fp.sbtp.xyz
certManager:
issuer: letsencrypt-staging

View File

@ -13,7 +13,6 @@ scout:
s3BucketName: futureporn
next:
imageName: gitea.futureporn.net/futureporn/next:latest
certIssuer: letsencrypt-staging
hostname: next.sbtp.xyz
capture:
imageName: gitea.futureporn.net/futureporn/capture:latest
@ -21,7 +20,6 @@ strapi:
imageName: sjc.vultrcr.com/fpcontainers/strapi
port: 1339
url: https://portal.futureporn.net
certIssuer: letsencrypt-prod
hostname: strapi.sbtp.xyz
ingressClassName: traefik
managedBy: Helm
@ -29,3 +27,5 @@ adminEmail: cj@futureporn.net
extraArgs:
- --dns01-recursive-nameservers-only
- --dns01-recursive-nameservers=8.8.8.8:53,1.1.1.1:53
certManager:
issuer: letsencrypt-production

View File

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- github.com/exoscale.cert-manager-webhook-exoscale/deploy/exoscale-webhook-kustomize

View File

@ -1,5 +1,3 @@
## DEPRECATED. We are using traefik to get certs
---
apiVersion: v1
kind: Namespace
@ -35,3 +33,4 @@ spec:
interval: 12h
values:
installCRDs: true

View File

@ -2,7 +2,8 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- external-dns.yaml
# - cert-manager.yaml
- cert-manager.yaml
- cert-manager-webhook-exoscale.yaml
- ingress-traefik.yaml
- grafana-k8s-dashboards.yaml
- kube-prometheus-stack.yaml

7
scripts/flux-bootstrap.sh Normal file → Executable file
View File

@ -1,10 +1,9 @@
#!/bin/bash
if [ -z "$(ENV)" ]; then \
if [ -z "${ENV}" ]; then \
echo "Error: ENV variable is not defined. Please set to one of development|staging|production"; exit 1; \
fi
## this way is annoying because deployment asks for git password
# flux bootstrap git \
# --kubeconfig /home/cj/.kube/vke.yaml \
# --url=https://gitea.futureporn.net/futureporn/fp.git \
@ -13,9 +12,9 @@ fi
# --token-auth=true \
# --path=clusters/production
## this way is more automatic although it does ask for yes/no confirmation that the ssh key has repo access
## --silent avoids the [Yes|no] prompt
flux bootstrap git \
--yes \
--silent \
--url="ssh://git@gitea.futureporn.net:2222/futureporn/fp" \
--branch=main \
--path="clusters/$ENV" \

View File

@ -25,8 +25,8 @@ EOF
# --from-literal=username=${TRAEFIK_USERNAME} \
# --from-literal=password=${TRAEFIK_PASSWORD}
kubectl --namespace futureporn delete secret exoscale --ignore-not-found
kubectl --namespace futureporn create secret generic exoscale \
kubectl --namespace cert-manager delete secret exoscale --ignore-not-found
kubectl --namespace cert-manager create secret generic exoscale \
--from-literal=apiKey=${EXOSCALE_API_KEY} \
--from-literal=apiSecret=${EXOSCALE_API_SECRET}

View File

@ -1,11 +1,12 @@
# Tiltfile for working with Next and Strapi locally
## cert-manager slows down Tilt updates so I prefer to keep it commented unless I specifically need to test certs
# load('ext://cert_manager', 'deploy_cert_manager')
# deploy_cert_manager(
# load_to_kind=True,
# # registry='localhost:5001'
# )
load('ext://cert_manager', 'deploy_cert_manager')
deploy_cert_manager(
load_to_kind=True,
version='v1.15.1',
)
default_registry('localhost:5001')
load('ext://helm_remote', 'helm_remote')
@ -44,6 +45,21 @@ load('ext://helm_remote', 'helm_remote')
# ]
# )
## this method results in the following error. Build Failed: Internal error occurred: failed calling webhook "webhook.cert-manager.io": failed to call webhook: Post "https://cert-manager-webhook.cert-manager.svc:443/validate?timeout=30s": service "cert-manager-webhook" not found
# helm_remote(
# 'cert-manager',
# repo_url='https://charts.jetstack.io',
# repo_name='cert-manager',
# namespace='cert-manager',
# version='1.15.1',
# set=[
# 'crds.enabled=true'
# ]
# )
helm_remote(
'traefik',
repo_name='traefik',
@ -52,12 +68,9 @@ helm_remote(
version='28.3.0',
set=[
'globalArguments[0]=--global.sendanonymoususage=false',
'globalArguments[1]=--certificatesresolvers.le.acme.email=cj@futureporn.net',
'globalArguments[2]=--certificatesresolvers.le.acme.storage=/data/acme.json',
'globalArguments[3]=--certificatesresolvers.le.acme.tlschallenge=true',
'globalArguments[4]=--certificatesresolvers.le.acme.caServer=https://acme-staging-v02.api.letsencrypt.org/directory',
'service.enabled=false',
'logs.access.enabled=true'
'logs.access.enabled=true',
'logs.general.level=DEBUG'
]
)
@ -402,3 +415,20 @@ k8s_resource(
# 'postgres',
# 'strapi'
# ])
# k8s_resource(
# workload='cert-manager',
# labels='cert-manager'
# )
# k8s_resource(
# workload='cert-manager-webhook',
# labels='cert-manager'
# )
# k8s_resource(
# workload='cert-manager-cainjector',
# labels='cert-manager'
# )
# k8s_resource(
# workload='cert-manager-startupapicheck',
# labels='cert-manager'
# )