progress
ci / build (push) Waiting to run Details

This commit is contained in:
CJ_Clippy 2024-06-28 15:23:04 -08:00
parent ace373283b
commit 14bc633237
20 changed files with 551 additions and 134 deletions

View File

@ -8,15 +8,19 @@ namespaces:
secrets: secrets:
dotenvx run -f .env.$(ENV) -- ./scripts/k8s-secrets.sh dotenvx run -f .env.$(ENV) -- ./scripts/k8s-secrets.sh
flux: flux:
./scripts/flux-bootstrap.sh ./scripts/flux-bootstrap.sh
dev: kind namespaces secrets chisel velero dev: kind namespaces secrets chisel velero
prod: export ENV=production namespaces secrets velero chisel flux prod: echoenv namespaces secrets velero chisel flux
staging: export ENV=staging namespaces secrets velero chisel flux staging: echoenv namespaces secrets velero flux
echoenv:
echo "[echoenv] Using ${ENV} environment. If this is not what you want, export ENV=development|staging|production"
scrap: namespaces secrets velero chisel flux
velero: velero:
./scripts/velero-create.sh ./scripts/velero-create.sh
@ -25,6 +29,9 @@ tilt:
kind get kubeconfig > ~/.kube/kind.yaml kind get kubeconfig > ~/.kube/kind.yaml
KUBECONFIG=~/.kube/kind.yaml tilt up -f ./t.wip.tiltfile KUBECONFIG=~/.kube/kind.yaml tilt up -f ./t.wip.tiltfile
exoscale:
kubectl apply -f https://raw.githubusercontent.com/exoscale/cert-manager-webhook-exoscale/master/deploy/exoscale-webhook-kustomize/deploy.yaml
kind: kind:
./scripts/kind-with-local-registry.sh ./scripts/kind-with-local-registry.sh
@ -36,7 +43,7 @@ clean:
dotenvx run -f .env.$(ENV) -- node ./packages/infra/vultr-delete-orphaned-resources.js dotenvx run -f .env.$(ENV) -- node ./packages/infra/vultr-delete-orphaned-resources.js
deps: deps:
echo "Some of the install methods for these dependencies are not cross-platform compatible. Some of the install methods are not well-tested. Expect this to fail. Please consult the Makefile for URLs to project sources." echo "Some of the install methods for these dependencies are not cross-platform compatible. Some of the install methods are not tested. Expect this to fail. Please consult the Makefile for URLs to project sources."
sudo pamac install make entr nvm kubectl docker helm expect sudo pamac install make entr nvm kubectl docker helm expect
curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | bash curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | bash
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
@ -48,7 +55,9 @@ deps:
newgrp docker newgrp docker
curl -OL 'https://github.com/vmware-tanzu/velero/releases/download/v1.13.2/velero-v1.13.2-linux-amd64.tar.gz' curl -OL 'https://github.com/vmware-tanzu/velero/releases/download/v1.13.2/velero-v1.13.2-linux-amd64.tar.gz'
npm install -g @dotenvx/dotenvx npm install -g @dotenvx/dotenvx
OS=$(go env GOOS); ARCH=$(go env GOARCH); curl -fsSL -o cmctl https://github.com/cert-manager/cmctl/releases/latest/download/cmctl_${OS}_${ARCH}
chmod +x cmctl
sudo mv cmctl /usr/local/bin
# A gitea act runner which runs locally # A gitea act runner which runs locally
# https://docs.gitea.com/next/usage/actions/overview # https://docs.gitea.com/next/usage/actions/overview

View File

@ -21,7 +21,6 @@ spec:
imageName: gitea.futureporn.net/futureporn/link2cid:latest imageName: gitea.futureporn.net/futureporn/link2cid:latest
next: next:
imageName: gitea.futureporn.net/futureporn/next:latest imageName: gitea.futureporn.net/futureporn/next:latest
certIssuer: letsencrypt-staging
hostname: next.futureporn.svc.cluster.local hostname: next.futureporn.svc.cluster.local
ingressClassName: traefik ingressClassName: traefik
capture: capture:
@ -33,7 +32,6 @@ spec:
imageName: gitea.futureporn.net/futureporn/scout-worker:latest imageName: gitea.futureporn.net/futureporn/scout-worker:latest
replicas: 1 replicas: 1
pubsubServerUrl: https://realtime.futureporn.svc.cluster.local/faye pubsubServerUrl: https://realtime.futureporn.svc.cluster.local/faye
certIssuer: letsencrypt-staging
hostname: next.futureporn.svc.cluster.local hostname: next.futureporn.svc.cluster.local
cdnBucketUrl: https://fp-dev.b-cdn.net cdnBucketUrl: https://fp-dev.b-cdn.net
s3BucketName: fp-dev s3BucketName: fp-dev
@ -41,7 +39,6 @@ spec:
imageName: gitea.futureporn.net/futureporn/strapi:latest imageName: gitea.futureporn.net/futureporn/strapi:latest
port: 1339 port: 1339
url: https://strapi.piko.sbtp.xyz url: https://strapi.piko.sbtp.xyz
certIssuer: letsencrypt-staging
hostname: strapi.futureporn.svc.cluster.local hostname: strapi.futureporn.svc.cluster.local
ingressClassName: traefik ingressClassName: traefik
realtime: realtime:
@ -49,3 +46,5 @@ spec:
adminEmail: cj@futureporn.net adminEmail: cj@futureporn.net
chisel: chisel:
exitNodeIp: "155.138.254.201" exitNodeIp: "155.138.254.201"
certManager:
issuer: letsencrypt-staging

View File

@ -33,7 +33,7 @@ spec:
traefik.ingress.kubernetes.io/router.tls.domains.0.main: podinfo.sbtp.xyz traefik.ingress.kubernetes.io/router.tls.domains.0.main: podinfo.sbtp.xyz
traefik.ingress.kubernetes.io/router.entrypoints: http traefik.ingress.kubernetes.io/router.entrypoints: http
traefik.ingress.kubernetes.io/service.nativelb: podinfo traefik.ingress.kubernetes.io/service.nativelb: podinfo
cert-manager.io/cluster-issuer: "letsencrypt" cert-manager.io/cluster-issuer: "letsencrypt-staging"
# ingress.kubernetes.io/ssl-redirect: "true" # ingress.kubernetes.io/ssl-redirect: "true"
hosts: hosts:
- host: podinfo.sbtp.xyz - host: podinfo.sbtp.xyz

View File

@ -9,9 +9,10 @@ spec:
certIssuer: letsencrypt-prod certIssuer: letsencrypt-prod
hostname: next.futureporn.net hostname: next.futureporn.net
scout: scout:
certIssuer: letsencrypt-prod
cdnBucketUrl: https://futureporn-b2.b-cdn.net cdnBucketUrl: https://futureporn-b2.b-cdn.net
s3BucketName: futureporn-b2 s3BucketName: futureporn-b2
strapi: strapi:
url: https://portal.futureporn.net url: https://portal.futureporn.net
hostname: portal.futureporn.net hostname: portal.futureporn.net
certManager:
issuer: letsencrypt-production

View File

@ -6,13 +6,13 @@ metadata:
spec: spec:
values: values:
next: next:
certIssuer: letsencrypt-staging
hostname: next.sbtp.xyz hostname: next.sbtp.xyz
scout: scout:
certIssuer: letsencrypt-staging
cdnBucketUrl: https://fp-dev.b-cdn.net cdnBucketUrl: https://fp-dev.b-cdn.net
s3BucketName: fp-dev s3BucketName: fp-dev
strapi: strapi:
url: https://strapi.sbtp.xyz url: https://strapi.sbtp.xyz
hostname: strapi.sbtp.xyz hostname: strapi.sbtp.xyz
managedBy: Helm managedBy: Helm
certManager:
issuer: letsencrypt-staging

View File

@ -0,0 +1,127 @@
## roles to deal with the following error
## exoscale.acme.exoscale.com is forbidden: User "system:serviceaccount:cert-manager:cert-manager" cannot create resource "exoscale"
## cannot create resource "exoscale" in API group "acme.exoscale.com" at the cluster scope
# I don't think I need the following commented yaml. I think the problem I was seeing was actually caused by the lack of cert-manager-webhook-exoscale
# ---
# apiVersion: rbac.authorization.k8s.io/v1
# kind: ClusterRole
# metadata:
# name: cert-manager-webhook-exoscale
# namespace: cert-manager
# labels:
# app: webhook
# rules:
# - apiGroups:
# - acme.exoscale.com
# resources:
# - '*'
# verbs:
# - 'create'
# ---
# apiVersion: rbac.authorization.k8s.io/v1
# kind: ClusterRoleBinding
# metadata:
# name: cert-manager-webhook-exoscale
# namespace: cert-manager
# labels:
# app: webhook
# roleRef:
# apiGroup: rbac.authorization.k8s.io
# kind: ClusterRole
# name: cert-manager-webhook-exoscale
# subjects:
# - apiGroup: ""
# kind: ServiceAccount
# name: cert-manager
# namespace: cert-manager
## Role & RoleBinding to give cert-manager access to the exoscale secret
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-manager-webhook-exoscale:secret-reader
namespace: cert-manager
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["exoscale"]
verbs: ["get", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager-webhook-exoscale:secret-reader
namespace: cert-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cert-manager-webhook-exoscale:secret-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager-webhook-exoscale
---
{{ if ne .Values.environment "production" }}
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# You must replace this email address with your own.
# Let's Encrypt will use this to contact you about expiring
# certificates, and issues related to your account.
email: {{ .Values.adminEmail }}
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: letsencrypt-staging
solvers:
- dns01:
webhook:
groupName: acme.exoscale.com
solverName: exoscale
config:
apiKeyRef:
name: exoscale
key: apiKey
apiSecretRef:
name: exoscale
key: apiSecret
{{ end }}
{{ if eq .Values.environment "production" }}
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# server: https://acme-staging-v02.api.letsencrypt.org/directory
server: https://acme-v02.api.letsencrypt.org/directory
email: {{ .Values.adminEmail }}
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- dns01:
webhook:
groupName: acme.exoscale.com
solverName: exoscale
config:
apiKeyRef:
name: exoscale
key: apiKey
apiSecretRef:
name: exoscale
key: apiSecret
{{ end }}

View File

@ -28,7 +28,7 @@ metadata:
namespace: futureporn namespace: futureporn
annotations: annotations:
external-dns.alpha.kubernetes.io/hostname: "{{ .Values.echo.hostname }}" external-dns.alpha.kubernetes.io/hostname: "{{ .Values.echo.hostname }}"
# chisel-operator.io/exit-node-name: "echo-exit-node" chisel-operator.io/exit-node-name: "echo-exit-node"
spec: spec:
selector: selector:
app: echo app: echo
@ -59,7 +59,7 @@ metadata:
annotations: annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web traefik.ingress.kubernetes.io/router.entrypoints: web
traefik.ingress.kubernetes.io/router.tls: "false" traefik.ingress.kubernetes.io/router.tls: "false"
traefik.ingress.kubernetes.io/router.tls.certresolver: le # cert-manager.io/cluster-issuer: "{{ .Values.certManager.issuer }}"
spec: spec:
rules: rules:
- host: "{{ .Values.echo.hostname }}" - host: "{{ .Values.echo.hostname }}"
@ -72,17 +72,5 @@ spec:
name: echo name: echo
port: port:
number: 8001 number: 8001
tls:
# --- - secretName: echo-cert
# apiVersion: traefik.io/v1alpha1
# kind: IngressRoute
# metadata:
# name: echo
# namespace: futureporn
# spec:
# routes:
# - match: Host(`echo.sbtp.xyz`)
# kind: Rule
# services:
# - name: echo
# port: 8001

View File

@ -0,0 +1,321 @@
## copied from https://github.com/exoscale/cert-manager-webhook-exoscale
---
# Source: exoscale-webhook/templates/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: cert-manager-webhook-exoscale
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
---
# Source: exoscale-webhook/templates/rbac.yaml
# Grant cert-manager permission to validate using our apiserver
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-webhook-exoscale:domain-solver
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
rules:
- apiGroups:
- acme.exoscale.com
resources:
- '*'
verbs:
- 'create'
---
# Source: exoscale-webhook/templates/rbac.yaml
# apiserver gets the auth-delegator role to delegate auth decisions to
# the core apiserver
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-webhook-exoscale:auth-delegator
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager-webhook-exoscale
namespace: cert-manager
---
# Source: exoscale-webhook/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-webhook-exoscale:domain-solver
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-webhook-exoscale:domain-solver
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager
namespace: cert-manager
---
# Source: exoscale-webhook/templates/rbac.yaml
# Grant the webhook permission to read the secrets containing the credentials
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-manager-webhook-exoscale:secrets-reader
namespace: cert-manager
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
rules:
- apiGroups:
- ''
resources:
- 'secrets'
verbs:
- 'get'
---
# Source: exoscale-webhook/templates/rbac.yaml
# Grant the webhook permission to read the ConfigMap containing the Kubernetes
# apiserver's requestheader-ca-certificate.
# This ConfigMap is automatically created by the Kubernetes apiserver.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager-webhook-exoscale:webhook-authentication-reader
namespace: kube-system
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager-webhook-exoscale
namespace: cert-manager
---
# Source: exoscale-webhook/templates/rbac.yaml
# Grant the webhook permission to read the secrets containing the credentials
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager-webhook-exoscale:secrets-reader
namespace: cert-manager
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cert-manager-webhook-exoscale:secrets-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager-webhook-exoscale
namespace: cert-manager
---
# Source: exoscale-webhook/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: cert-manager-webhook-exoscale
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
spec:
type: ClusterIP
ports:
- port: 443
targetPort: https
protocol: TCP
name: https
selector:
app: exoscale-webhook
release: exoscale-webhook
---
# Source: exoscale-webhook/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: cert-manager-webhook-exoscale
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
spec:
replicas:
selector:
matchLabels:
app: exoscale-webhook
release: exoscale-webhook
template:
metadata:
labels:
app: exoscale-webhook
release: exoscale-webhook
spec:
serviceAccountName: cert-manager-webhook-exoscale
containers:
- name: exoscale-webhook
image: "exoscale/cert-manager-webhook-exoscale:latest"
imagePullPolicy: IfNotPresent
args:
- --tls-cert-file=/tls/tls.crt
- --tls-private-key-file=/tls/tls.key
env:
- name: GROUP_NAME
value: "acme.exoscale.com"
- name: EXOSCALE_DEBUG
value: ""
- name: EXOSCALE_API_TRACE
value: ""
ports:
- name: https
containerPort: 443
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTPS
path: /healthz
port: https
readinessProbe:
httpGet:
scheme: HTTPS
path: /healthz
port: https
volumeMounts:
- name: certs
mountPath: /tls
readOnly: true
resources:
{}
volumes:
- name: certs
secret:
secretName: cert-manager-webhook-exoscale-webhook-tls
---
# Source: exoscale-webhook/templates/apiservice.yaml
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1alpha1.acme.exoscale.com
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
annotations:
cert-manager.io/inject-ca-from: "cert-manager/cert-manager-webhook-exoscale-webhook-tls"
spec:
group: acme.exoscale.com
groupPriorityMinimum: 1000
versionPriority: 15
service:
name: cert-manager-webhook-exoscale
namespace: cert-manager
version: v1alpha1
---
# Source: exoscale-webhook/templates/pki.yaml
# Generate a CA Certificate used to sign certificates for the webhook
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: cert-manager-webhook-exoscale-ca
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
spec:
secretName: cert-manager-webhook-exoscale-ca
duration: 43800h # 5y
issuerRef:
name: cert-manager-webhook-exoscale-selfsign
commonName: "ca.exoscale-webhook.cert-manager"
isCA: true
---
# Source: exoscale-webhook/templates/pki.yaml
# Finally, generate a serving certificate for the webhook to use
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: cert-manager-webhook-exoscale-webhook-tls
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
spec:
secretName: cert-manager-webhook-exoscale-webhook-tls
duration: 8760h # 1y
issuerRef:
name: cert-manager-webhook-exoscale-ca
dnsNames:
- cert-manager-webhook-exoscale
- cert-manager-webhook-exoscale.cert-manager
- cert-manager-webhook-exoscale.cert-manager.svc
---
# Source: exoscale-webhook/templates/pki.yaml
# Create a selfsigned Issuer, in order to create a root CA certificate for
# signing webhook serving certificates
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: cert-manager-webhook-exoscale-selfsign
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
spec:
selfSigned: {}
---
# Source: exoscale-webhook/templates/pki.yaml
# Create an Issuer that uses the above generated CA certificate to issue certs
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: cert-manager-webhook-exoscale-ca
namespace: "cert-manager"
labels:
app: exoscale-webhook
chart: exoscale-webhook-0.3.0
release: exoscale-webhook
heritage: Helm
spec:
ca:
secretName: cert-manager-webhook-exoscale-ca

View File

@ -1,49 +0,0 @@
{{ if ne .Values.environment "production" }}
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# You must replace this email address with your own.
# Let's Encrypt will use this to contact you about expiring
# certificates, and issues related to your account.
email: {{ .Values.adminEmail }}
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: letsencrypt-staging
solvers:
- dns01:
webhook:
groupName: acme.vultr.com
solverName: vultr
config:
apiKeySecretRef:
key: apiKey
name: vultr
{{ end }}
{{ if eq .Values.environment "production" }}
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# server: https://acme-staging-v02.api.letsencrypt.org/directory
server: https://acme-v02.api.letsencrypt.org/directory
email: {{ .Values.adminEmail }}
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- dns01:
webhook:
groupName: acme.vultr.com
solverName: vultr
config:
apiKeySecretRef:
key: apiKey
name: vultr
{{ end }}

View File

@ -51,7 +51,7 @@ metadata:
annotations: annotations:
traefik.ingress.kubernetes.io/router.entrypoints: http traefik.ingress.kubernetes.io/router.entrypoints: http
traefik.ingress.kubernetes.io/router.tls: "true" traefik.ingress.kubernetes.io/router.tls: "true"
traefik.ingress.kubernetes.io/tls.certresolver: le cert-manager.io/cluster-issuer: "{{ .Values.certManager.issuer }}"
kubernetes.io/ingress.class: traefik kubernetes.io/ingress.class: traefik
external-dns.alpha.kubernetes.io/hostname: "{{ .Values.next.hostname }}" external-dns.alpha.kubernetes.io/hostname: "{{ .Values.next.hostname }}"
spec: spec:
@ -98,8 +98,8 @@ metadata:
namespace: futureporn namespace: futureporn
annotations: annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web traefik.ingress.kubernetes.io/router.entrypoints: web
traefik.ingress.kubernetes.io/router.tls: "false" traefik.ingress.kubernetes.io/router.tls: "true"
traefik.ingress.kubernetes.io/router.tls.certresolver: le cert-manager.io/cluster-issuer: "{{ .Values.certManager.issuer }}"
spec: spec:
rules: rules:
- host: next.fp.sbtp.xyz - host: next.fp.sbtp.xyz
@ -112,3 +112,7 @@ spec:
name: next name: next
port: port:
number: 3000 number: 3000
tls:
- hosts:
- "{{ .Values.next.hostname }}"
secretName: next-cert

View File

@ -162,18 +162,14 @@ metadata:
namespace: futureporn namespace: futureporn
annotations: annotations:
kubernetes.io/ingress.class: traefik kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: web traefik.ingress.kubernetes.io/router.entrypoints: websecure,web
traefik.ingress.kubernetes.io/router.tls: "false" traefik.ingress.kubernetes.io/router.tls: "false"
traefik.ingress.kubernetes.io/router.tls.certresolver: le cert-manager.io/cluster-issuer: "{{ .Values.certManager.issuer }}"
spec: spec:
ingressClassName: "{{ .Values.strapi.ingressClassName }}" tls:
backend: - hosts:
serviceName: strapi - "{{ .Values.strapi.hostname }}"
servicePort: 1339 secretName: strapi-cert
# tls:
# - secretName: strapi-tls
# hosts:
# - "{{ .Values.strapi.hostname }}"
rules: rules:
- host: "{{ .Values.strapi.hostname }}" - host: "{{ .Values.strapi.hostname }}"
http: http:
@ -198,18 +194,3 @@ spec:
port: 9090 port: 9090
auth: chisel auth: chisel
{{ end }} {{ end }}
# annotations:
# spec:
# rules:
# - host: echo.sbtp.xyz
# http:
# paths:
# - path: /
# pathType: Prefix
# backend:
# service:
# name: echo
# port:
# number: 8001

View File

@ -37,4 +37,6 @@ spec:
port: 9090 port: 9090
auth: chisel auth: chisel
{{ end }} {{ end }}

View File

@ -5,7 +5,6 @@ link2cid:
imageName: fp/link2cid imageName: fp/link2cid
next: next:
imageName: fp/next imageName: fp/next
certIssuer: letsencrypt-staging
hostname: next.fp.sbtp.xyz hostname: next.fp.sbtp.xyz
ingressClassName: traefik ingressClassName: traefik
capture: capture:
@ -17,7 +16,6 @@ scout:
imageName: fp/scout-worker imageName: fp/scout-worker
replicas: 1 replicas: 1
pubsubServerUrl: https://realtime.futureporn.svc.cluster.local/faye pubsubServerUrl: https://realtime.futureporn.svc.cluster.local/faye
certIssuer: letsencrypt-staging
hostname: next.futureporn.svc.cluster.local hostname: next.futureporn.svc.cluster.local
cdnBucketUrl: https://fp-dev.b-cdn.net cdnBucketUrl: https://fp-dev.b-cdn.net
s3BucketName: fp-dev s3BucketName: fp-dev
@ -25,7 +23,6 @@ strapi:
imageName: fp/strapi imageName: fp/strapi
port: 1339 port: 1339
url: https://strapi.fp.sbtp.xyz url: https://strapi.fp.sbtp.xyz
certIssuer: letsencrypt-staging
hostname: strapi.fp.sbtp.xyz hostname: strapi.fp.sbtp.xyz
ingressClassName: traefik ingressClassName: traefik
ngrok: ngrok:
@ -36,4 +33,6 @@ adminEmail: cj@futureporn.net
chisel: chisel:
exitNodeIp: "155.138.254.201" exitNodeIp: "155.138.254.201"
echo: echo:
hostname: echo.fp.sbtp.xyz hostname: echo.fp.sbtp.xyz
certManager:
issuer: letsencrypt-staging

View File

@ -13,7 +13,6 @@ scout:
s3BucketName: futureporn s3BucketName: futureporn
next: next:
imageName: gitea.futureporn.net/futureporn/next:latest imageName: gitea.futureporn.net/futureporn/next:latest
certIssuer: letsencrypt-staging
hostname: next.sbtp.xyz hostname: next.sbtp.xyz
capture: capture:
imageName: gitea.futureporn.net/futureporn/capture:latest imageName: gitea.futureporn.net/futureporn/capture:latest
@ -21,11 +20,12 @@ strapi:
imageName: sjc.vultrcr.com/fpcontainers/strapi imageName: sjc.vultrcr.com/fpcontainers/strapi
port: 1339 port: 1339
url: https://portal.futureporn.net url: https://portal.futureporn.net
certIssuer: letsencrypt-prod
hostname: strapi.sbtp.xyz hostname: strapi.sbtp.xyz
ingressClassName: traefik ingressClassName: traefik
managedBy: Helm managedBy: Helm
adminEmail: cj@futureporn.net adminEmail: cj@futureporn.net
extraArgs: extraArgs:
- --dns01-recursive-nameservers-only - --dns01-recursive-nameservers-only
- --dns01-recursive-nameservers=8.8.8.8:53,1.1.1.1:53 - --dns01-recursive-nameservers=8.8.8.8:53,1.1.1.1:53
certManager:
issuer: letsencrypt-production

View File

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- github.com/exoscale.cert-manager-webhook-exoscale/deploy/exoscale-webhook-kustomize

View File

@ -1,5 +1,3 @@
## DEPRECATED. We are using traefik to get certs
--- ---
apiVersion: v1 apiVersion: v1
kind: Namespace kind: Namespace
@ -35,3 +33,4 @@ spec:
interval: 12h interval: 12h
values: values:
installCRDs: true installCRDs: true

View File

@ -2,7 +2,8 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
resources: resources:
- external-dns.yaml - external-dns.yaml
# - cert-manager.yaml - cert-manager.yaml
- cert-manager-webhook-exoscale.yaml
- ingress-traefik.yaml - ingress-traefik.yaml
- grafana-k8s-dashboards.yaml - grafana-k8s-dashboards.yaml
- kube-prometheus-stack.yaml - kube-prometheus-stack.yaml

7
scripts/flux-bootstrap.sh Normal file → Executable file
View File

@ -1,10 +1,9 @@
#!/bin/bash #!/bin/bash
if [ -z "$(ENV)" ]; then \ if [ -z "${ENV}" ]; then \
echo "Error: ENV variable is not defined. Please set to one of development|staging|production"; exit 1; \ echo "Error: ENV variable is not defined. Please set to one of development|staging|production"; exit 1; \
fi fi
## this way is annoying because deployment asks for git password
# flux bootstrap git \ # flux bootstrap git \
# --kubeconfig /home/cj/.kube/vke.yaml \ # --kubeconfig /home/cj/.kube/vke.yaml \
# --url=https://gitea.futureporn.net/futureporn/fp.git \ # --url=https://gitea.futureporn.net/futureporn/fp.git \
@ -13,9 +12,9 @@ fi
# --token-auth=true \ # --token-auth=true \
# --path=clusters/production # --path=clusters/production
## this way is more automatic although it does ask for yes/no confirmation that the ssh key has repo access ## --silent avoids the [Yes|no] prompt
flux bootstrap git \ flux bootstrap git \
--yes \ --silent \
--url="ssh://git@gitea.futureporn.net:2222/futureporn/fp" \ --url="ssh://git@gitea.futureporn.net:2222/futureporn/fp" \
--branch=main \ --branch=main \
--path="clusters/$ENV" \ --path="clusters/$ENV" \

View File

@ -25,8 +25,8 @@ EOF
# --from-literal=username=${TRAEFIK_USERNAME} \ # --from-literal=username=${TRAEFIK_USERNAME} \
# --from-literal=password=${TRAEFIK_PASSWORD} # --from-literal=password=${TRAEFIK_PASSWORD}
kubectl --namespace futureporn delete secret exoscale --ignore-not-found kubectl --namespace cert-manager delete secret exoscale --ignore-not-found
kubectl --namespace futureporn create secret generic exoscale \ kubectl --namespace cert-manager create secret generic exoscale \
--from-literal=apiKey=${EXOSCALE_API_KEY} \ --from-literal=apiKey=${EXOSCALE_API_KEY} \
--from-literal=apiSecret=${EXOSCALE_API_SECRET} --from-literal=apiSecret=${EXOSCALE_API_SECRET}

View File

@ -1,11 +1,12 @@
# Tiltfile for working with Next and Strapi locally # Tiltfile for working with Next and Strapi locally
## cert-manager slows down Tilt updates so I prefer to keep it commented unless I specifically need to test certs ## cert-manager slows down Tilt updates so I prefer to keep it commented unless I specifically need to test certs
# load('ext://cert_manager', 'deploy_cert_manager') load('ext://cert_manager', 'deploy_cert_manager')
# deploy_cert_manager( deploy_cert_manager(
# load_to_kind=True, load_to_kind=True,
# # registry='localhost:5001' version='v1.15.1',
# ) )
default_registry('localhost:5001') default_registry('localhost:5001')
load('ext://helm_remote', 'helm_remote') load('ext://helm_remote', 'helm_remote')
@ -44,6 +45,21 @@ load('ext://helm_remote', 'helm_remote')
# ] # ]
# ) # )
## this method results in the following error. Build Failed: Internal error occurred: failed calling webhook "webhook.cert-manager.io": failed to call webhook: Post "https://cert-manager-webhook.cert-manager.svc:443/validate?timeout=30s": service "cert-manager-webhook" not found
# helm_remote(
# 'cert-manager',
# repo_url='https://charts.jetstack.io',
# repo_name='cert-manager',
# namespace='cert-manager',
# version='1.15.1',
# set=[
# 'crds.enabled=true'
# ]
# )
helm_remote( helm_remote(
'traefik', 'traefik',
repo_name='traefik', repo_name='traefik',
@ -52,12 +68,9 @@ helm_remote(
version='28.3.0', version='28.3.0',
set=[ set=[
'globalArguments[0]=--global.sendanonymoususage=false', 'globalArguments[0]=--global.sendanonymoususage=false',
'globalArguments[1]=--certificatesresolvers.le.acme.email=cj@futureporn.net',
'globalArguments[2]=--certificatesresolvers.le.acme.storage=/data/acme.json',
'globalArguments[3]=--certificatesresolvers.le.acme.tlschallenge=true',
'globalArguments[4]=--certificatesresolvers.le.acme.caServer=https://acme-staging-v02.api.letsencrypt.org/directory',
'service.enabled=false', 'service.enabled=false',
'logs.access.enabled=true' 'logs.access.enabled=true',
'logs.general.level=DEBUG'
] ]
) )
@ -402,3 +415,20 @@ k8s_resource(
# 'postgres', # 'postgres',
# 'strapi' # 'strapi'
# ]) # ])
# k8s_resource(
# workload='cert-manager',
# labels='cert-manager'
# )
# k8s_resource(
# workload='cert-manager-webhook',
# labels='cert-manager'
# )
# k8s_resource(
# workload='cert-manager-cainjector',
# labels='cert-manager'
# )
# k8s_resource(
# workload='cert-manager-startupapicheck',
# labels='cert-manager'
# )