multicluster example
ci / build (push) Successful in 14s Details

This commit is contained in:
CJ_Clippy 2024-04-26 01:28:42 +00:00
parent ac35d9aae3
commit 2f0e37d18b
29 changed files with 477 additions and 36 deletions

View File

@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
name: ipfs-pod
namespace: default
labels:
app.kubernetes.io/name: ipfs
spec:
containers:
- name: ipfs
image: ipfs/kubo
ports:
- containerPort: 5001
- containerPort: 8080
volumeMounts:
- name: ipfs-pvc
mountPath: /data/ipfs
restartPolicy: OnFailure
volumes:
- name: ipfs-pvc
persistentVolumeClaim:
claimName: ipfs-pvc

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ipfs-pvc
namespace: default
annotations:
meta.helm.sh/release-name: fp
meta.helm.sh/release-namespace: default
labels:
app.kubernetes.io/managed-by: {{ .Values.managedBy }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 40Gi
storageClassName: {{ .Values.storageClassName }}

View File

@ -0,0 +1,23 @@
apiVersion: v1
kind: Service
metadata:
name: ipfs-service
namespace: default
annotations:
meta.helm.sh/release-name: fp
meta.helm.sh/release-namespace: default
labels:
app.kubernetes.io/managed-by: {{ .Values.managedBy }}
spec:
selector:
app.kubernetes.io/name: ipfs
ports:
- name: gateway
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 5001
targetPort: 5001

View File

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- link2cid.yaml

View File

@ -0,0 +1,110 @@
apiVersion: v1
kind: Service
metadata:
name: link2cid
namespace: default
spec:
selector:
app: link2cid
ports:
- name: http
protocol: TCP
port: 80
targetPort: 3939
- name: https
protocol: TCP
port: 443
targetPort: 3939
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: link2cid
namespace: default
spec:
selector:
matchLabels:
app: link2cid
template:
metadata:
labels:
app: link2cid
spec:
containers:
- image: {{ .Values.link2cid.containerName }}
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
name: link2cid
ports:
- containerPort: 3939
env:
- name: IPFS_URL
value: http://ipfs-service:5001
- name: PORT
value: '3939'
- name: API_KEY
valueFrom:
secretKeyRef:
name: link2cid
key: apiKey
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: link2cid
namespace: default
annotations:
meta.helm.sh/release-name: fp
meta.helm.sh/release-namespace: default
labels:
app.kubernetes.io/managed-by: {{ .Values.managedBy }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 40Gi
storageClassName: {{ .Values.storageClassName }}
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: link2cid-ingress
namespace: default
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: nginx
secretTemplate:
annotations:
reflector.v1.k8s.emberstack.com/reflection-allowed: "true"
reflector.v1.k8s.emberstack.com/reflection-allowed-namespaces: ""
tls:
- hosts:
- link2cid.sbtp.xyz
secretName: link2cid-tls
rules:
- host: link2cid.sbtp.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: link2cid
port:
number: 80

View File

@ -0,0 +1,12 @@
# storageClassName: csi-hostpath-sc # used by minikube
storageClassName: standard # used by Kind
link2cid:
containerName: fp/link2cid
next:
containerName: fp/next
strapi:
containerName: fp/strapi
port: 1337
url: http://localhost:1337
managedBy: Dildo
adminEmail: cj@futureporn.net

View File

@ -0,0 +1,14 @@
storageClassName: vultr-block-storage-hdd
link2cid:
containerName: gitea.futureporn.net/futureporn/link2cid:latest
next:
containerName: sjc.vultrcr.com/fpcontainers/next
strapi:
containerName: sjc.vultrcr.com/fpcontainers/strapi
port: 1337
url: https://portal.futureporn.net
managedBy: Helm
adminEmail: cj@futureporn.net
extraArgs:
- --dns01-recursive-nameservers-only
- --dns01-recursive-nameservers=8.8.8.8:53,1.1.1.1:53

View File

@ -0,0 +1,28 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: windmill-ingress
namespace: default
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-staging"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
# If you encounter a redirect loop or are getting a 307 response code
# then you need to force the nginx ingress to connect to the backend using HTTPS.
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
ingressClassName: nginx
rules:
- host: windmill2.sbtp.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: windmmill-app
port:
name: https
tls:
- hosts:
- windmill2.sbtp.xyz
secretName: windmill-tls

View File

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: windmill
resources:
- windmill.yaml

View File

View File

@ -0,0 +1,8 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../base/futureporn
patches:
- path: futureporn-values.yaml
target:
kind: HelmRelease

View File

View File

@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../base/futureporn

View File

@ -1,16 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: fp
namespace: default
spec:
dependsOn: cert-manager
interval: 1m
url: https://gitea.futureporn.net/futureporn/fp.git
ref:
branch: main
ignore: |
# exclude all
/*
# include charts directory
!/charts/

View File

@ -1,12 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: podinfo
namespace: flux-system
spec:
dependsOn: cert-manager
interval: 10m0s
ref:
branch: master
url: https://github.com/stefanprodan/podinfo

View File

@ -2,16 +2,16 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1 apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization kind: Kustomization
metadata: metadata:
name: podinfo name: apps
namespace: flux-system namespace: flux-system
spec: spec:
interval: 30m0s interval: 10m0s
path: ./kustomize dependsOn:
prune: true - name: infra-configs
retryInterval: 2m0s
sourceRef: sourceRef:
kind: GitRepository kind: GitRepository
name: podinfo name: flux-system
targetNamespace: default path: ./apps/production
timeout: 3m0s prune: true
wait: true wait: true
timeout: 5m0s

View File

@ -0,0 +1,33 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: infra-controllers
namespace: flux-system
spec:
interval: 1h
retryInterval: 1m
timeout: 5m
sourceRef:
kind: GitRepository
name: flux-system
path: ./infrastructure/controllers
prune: true
wait: true
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: infra-configs
namespace: flux-system
spec:
dependsOn:
- name: infra-controllers
interval: 1h
retryInterval: 1m
timeout: 5m
sourceRef:
kind: GitRepository
name: flux-system
path: ./infrastructure/configs
prune: true

View File

@ -0,0 +1,45 @@
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# server: https://acme-staging-v02.api.letsencrypt.org/directory
server: https://acme-v02.api.letsencrypt.org/directory
email: cj@futureporn.net
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- dns01:
webhook:
groupName: acme.vultr.com
solverName: vultr
config:
apiKeySecretRef:
key: apiKey
name: vultr
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# You must replace this email address with your own.
# Let's Encrypt will use this to contact you about expiring
# certificates, and issues related to your account.
email: cj@futureporn.net
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: letsencrypt-staging
solvers:
- dns01:
webhook:
groupName: acme.vultr.com
solverName: vultr
config:
apiKeySecretRef:
key: apiKey
name: vultr-credentials

View File

@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cluster-issuers.yaml

View File

@ -0,0 +1,69 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-dns
rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: external-dns
template:
metadata:
labels:
app: external-dns
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
image: registry.k8s.io/external-dns/external-dns:v0.14.1
args:
- --source=ingress
- --domain-filter=sbtp.xyz
- --provider=vultr
env:
- name: VULTR_API_KEY
valueFrom:
secretKeyRef:
name: vultr
key: apiKey

View File

@ -0,0 +1,39 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
toolkit.fluxcd.io/tenant: sre-team
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: HelmRepository
metadata:
name: ingress-nginx
namespace: ingress-nginx
spec:
interval: 24h
url: https://kubernetes.github.io/ingress-nginx
---
apiVersion: helm.toolkit.fluxcd.io/v2beta2
kind: HelmRelease
metadata:
name: ingress-nginx
namespace: ingress-nginx
spec:
interval: 30m
chart:
spec:
chart: ingress-nginx
version: "*"
sourceRef:
kind: HelmRepository
name: ingress-nginx
namespace: ingress-nginx
interval: 12h
values:
controller:
service:
type: "NodePort"
admissionWebhooks:
enabled: false

View File

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cert-manager.yaml
- ingress-nginx.yaml

View File

@ -0,0 +1,25 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-manager-webhook-vultr-secret-reader
namespace: cert-manager
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "watch", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager-webhook-vultr-secret-reader-binding
namespace: cert-manager
subjects:
- kind: ServiceAccount
name: cert-manager-webhook-vultr
namespace: cert-manager
roleRef:
kind: Role
name: cert-manager-webhook-vultr-secret-reader
apiGroup: rbac.authorization.k8s.io