try kamal

This commit is contained in:
CJ_Clippy 2025-01-10 19:10:04 -08:00
parent 8b8de3b072
commit 5f6a59d6f2
213 changed files with 36657 additions and 7553 deletions

View File

@ -1 +1 @@
{"last_found_secrets": [{"match": "7630852e9a6a0aecb849c91d14d426ca88187886fdf466189d67145856bdac3e", "name": "Generic Password - charts/postgresql/postgresql/templates/secrets.yaml"}]}
{"last_found_secrets": [{"match": "6e0d657eb1f0fbc40cf0b8f3c3873ef627cc9cb7c4108d1c07d979c04bc8a4bb", "name": "Generic Password - commit://staged/services/bright/config/test.exs"}]}

3
.gitignore vendored
View File

@ -1,3 +1,6 @@
.kamal/secrets*
.venv/
**/.env*

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "contrib/superstreamer"]
path = contrib/superstreamer
url = git@github.com:superstreamerapp/superstreamer.git

View File

@ -0,0 +1,3 @@
#!/bin/sh
echo "Docker set up on $KAMAL_HOSTS..."

14
.kamal/hooks/post-deploy.sample Executable file
View File

@ -0,0 +1,14 @@
#!/bin/sh
# A sample post-deploy hook
#
# These environment variables are available:
# KAMAL_RECORDED_AT
# KAMAL_PERFORMER
# KAMAL_VERSION
# KAMAL_HOSTS
# KAMAL_ROLE (if set)
# KAMAL_DESTINATION (if set)
# KAMAL_RUNTIME
echo "$KAMAL_PERFORMER deployed $KAMAL_VERSION to $KAMAL_DESTINATION in $KAMAL_RUNTIME seconds"

View File

@ -0,0 +1,3 @@
#!/bin/sh
echo "Rebooted kamal-proxy on $KAMAL_HOSTS"

51
.kamal/hooks/pre-build.sample Executable file
View File

@ -0,0 +1,51 @@
#!/bin/sh
# A sample pre-build hook
#
# Checks:
# 1. We have a clean checkout
# 2. A remote is configured
# 3. The branch has been pushed to the remote
# 4. The version we are deploying matches the remote
#
# These environment variables are available:
# KAMAL_RECORDED_AT
# KAMAL_PERFORMER
# KAMAL_VERSION
# KAMAL_HOSTS
# KAMAL_ROLE (if set)
# KAMAL_DESTINATION (if set)
if [ -n "$(git status --porcelain)" ]; then
echo "Git checkout is not clean, aborting..." >&2
git status --porcelain >&2
exit 1
fi
first_remote=$(git remote)
if [ -z "$first_remote" ]; then
echo "No git remote set, aborting..." >&2
exit 1
fi
current_branch=$(git branch --show-current)
if [ -z "$current_branch" ]; then
echo "Not on a git branch, aborting..." >&2
exit 1
fi
remote_head=$(git ls-remote $first_remote --tags $current_branch | cut -f1)
if [ -z "$remote_head" ]; then
echo "Branch not pushed to remote, aborting..." >&2
exit 1
fi
if [ "$KAMAL_VERSION" != "$remote_head" ]; then
echo "Version ($KAMAL_VERSION) does not match remote HEAD ($remote_head), aborting..." >&2
exit 1
fi
exit 0

47
.kamal/hooks/pre-connect.sample Executable file
View File

@ -0,0 +1,47 @@
#!/usr/bin/env ruby
# A sample pre-connect check
#
# Warms DNS before connecting to hosts in parallel
#
# These environment variables are available:
# KAMAL_RECORDED_AT
# KAMAL_PERFORMER
# KAMAL_VERSION
# KAMAL_HOSTS
# KAMAL_ROLE (if set)
# KAMAL_DESTINATION (if set)
# KAMAL_RUNTIME
hosts = ENV["KAMAL_HOSTS"].split(",")
results = nil
max = 3
elapsed = Benchmark.realtime do
results = hosts.map do |host|
Thread.new do
tries = 1
begin
Socket.getaddrinfo(host, 0, Socket::AF_UNSPEC, Socket::SOCK_STREAM, nil, Socket::AI_CANONNAME)
rescue SocketError
if tries < max
puts "Retrying DNS warmup: #{host}"
tries += 1
sleep rand
retry
else
puts "DNS warmup failed: #{host}"
host
end
end
tries
end
end.map(&:value)
end
retries = results.sum - hosts.size
nopes = results.count { |r| r == max }
puts "Prewarmed %d DNS lookups in %.2f sec: %d retries, %d failures" % [ hosts.size, elapsed, retries, nopes ]

109
.kamal/hooks/pre-deploy.sample Executable file
View File

@ -0,0 +1,109 @@
#!/usr/bin/env ruby
# A sample pre-deploy hook
#
# Checks the Github status of the build, waiting for a pending build to complete for up to 720 seconds.
#
# Fails unless the combined status is "success"
#
# These environment variables are available:
# KAMAL_RECORDED_AT
# KAMAL_PERFORMER
# KAMAL_VERSION
# KAMAL_HOSTS
# KAMAL_COMMAND
# KAMAL_SUBCOMMAND
# KAMAL_ROLE (if set)
# KAMAL_DESTINATION (if set)
# Only check the build status for production deployments
if ENV["KAMAL_COMMAND"] == "rollback" || ENV["KAMAL_DESTINATION"] != "production"
exit 0
end
require "bundler/inline"
# true = install gems so this is fast on repeat invocations
gemfile(true, quiet: true) do
source "https://rubygems.org"
gem "octokit"
gem "faraday-retry"
end
MAX_ATTEMPTS = 72
ATTEMPTS_GAP = 10
def exit_with_error(message)
$stderr.puts message
exit 1
end
class GithubStatusChecks
attr_reader :remote_url, :git_sha, :github_client, :combined_status
def initialize
@remote_url = `git config --get remote.origin.url`.strip.delete_prefix("https://github.com/")
@git_sha = `git rev-parse HEAD`.strip
@github_client = Octokit::Client.new(access_token: ENV["GITHUB_TOKEN"])
refresh!
end
def refresh!
@combined_status = github_client.combined_status(remote_url, git_sha)
end
def state
combined_status[:state]
end
def first_status_url
first_status = combined_status[:statuses].find { |status| status[:state] == state }
first_status && first_status[:target_url]
end
def complete_count
combined_status[:statuses].count { |status| status[:state] != "pending"}
end
def total_count
combined_status[:statuses].count
end
def current_status
if total_count > 0
"Completed #{complete_count}/#{total_count} checks, see #{first_status_url} ..."
else
"Build not started..."
end
end
end
$stdout.sync = true
puts "Checking build status..."
attempts = 0
checks = GithubStatusChecks.new
begin
loop do
case checks.state
when "success"
puts "Checks passed, see #{checks.first_status_url}"
exit 0
when "failure"
exit_with_error "Checks failed, see #{checks.first_status_url}"
when "pending"
attempts += 1
end
exit_with_error "Checks are still pending, gave up after #{MAX_ATTEMPTS * ATTEMPTS_GAP} seconds" if attempts == MAX_ATTEMPTS
puts checks.current_status
sleep(ATTEMPTS_GAP)
checks.refresh!
end
rescue Octokit::NotFound
exit_with_error "Build status could not be found"
end

View File

@ -0,0 +1,3 @@
#!/bin/sh
echo "Rebooting kamal-proxy on $KAMAL_HOSTS..."

View File

@ -1,16 +1,16 @@
git monorepo.
devbox for shareable development environment tooling
git monorepo for housing separate node packages within a single repository
TypeScript
pnpm for workspaces.
pnpm for package management and workspaces (separate node packages.)
Kubernetes for Development using Tiltfile
Kubernetes for Production, deployed using FluxCD
Tested on VKE v1.30.0+1 (PVCs on other versions may not be fulfilled)
devbox for shareable development environment
Kubernetes deployed to Hetzner using https://github.com/kube-hetzner/terraform-hcloud-kube-hetzner
ggshield for preventing git commits containing secrets

View File

@ -4,7 +4,7 @@
Source Code for https://futureporn.net
See ./ARCHITECTURE.md for overview
See ./ARCHITECTURE.md for an overview of the infrastructure components.
## Getting Started
@ -27,7 +27,6 @@ The main gist is as follows
Tilt will manage the KIND cluster, downloading necessary docker containers and building the containers listed in the fp helm chart at ./Charts/fp. Making changes to these charts or the application code will update or re-build the images as necessary.
## Metrics Notes
Keeping track of metrics we want to scrape using Prometheus

414
Tiltfile
View File

@ -49,35 +49,6 @@ dotenv(fn='.env.development')
## @see https://github.com/fluxcd/helm-controller/blob/c8ae4b6ad225d37b19bacb634db784d6096908ac/api/v2beta2/reference_types.go#L53
# helm_remote(
# 'velero',
# repo_name='velero',
# repo_url='https://vmware-tanzu.github.io/helm-charts',
# namespace='futureporn',
# version='6.6.0',
# set=[
# 'configuration.backupStorageLocation[0].name=dev',
# 'configuration.backupStorageLocation[0].provider=aws',
# 'configuration.backupStorageLocation[0].bucket=futureporn-db-backup-dev',
# 'configuration.backupStorageLocation[0].config.region=us-west-000',
# 'configuration.backupStorageLocation[0].config.s3ForcePathStyle=true',
# 'configuration.backupStorageLocation[0].config.s3Url=https://s3.us-west-000.backblazeb2.com',
# 'credentials.secretContents=cloud\n[default]\naws_access_key_id=AAAA\naws_secret_access_key=BBBB',
# 'snapshotsEnabled=false',
# # --set configuration.backupStorageLocation[0].name=<BACKUP STORAGE LOCATION NAME> \
# # --set configuration.backupStorageLocation[0].provider=<PROVIDER NAME> \
# # --set configuration.backupStorageLocation[0].bucket=<BUCKET NAME> \
# # --set configuration.backupStorageLocation[0].config.region=<REGION> \
# # --set configuration.volumeSnapshotLocation[0].name=<VOLUME SNAPSHOT LOCATION NAME> \
# # --set configuration.volumeSnapshotLocation[0].provider=<PROVIDER NAME> \
# # --set configuration.volumeSnapshotLocation[0].config.region=<REGION> \
# # --set initContainers[0].name=velero-plugin-for-<PROVIDER NAME> \
# # --set initContainers[0].image=velero/velero-plugin-for-<PROVIDER NAME>:<PROVIDER PLUGIN TAG> \
# # --set initContainers[0].volumeMounts[0].mountPath=/target \
# # --set initContainers[0].volumeMounts[0].name=plugins
# ]
# )
# helm_remote(
@ -116,13 +87,7 @@ k8s_yaml(helm(
'./charts/traefik/values-overrides.yaml'
]
))
k8s_yaml(helm(
'./charts/keycloak/keycloak',
namespace='futureporn',
values=[
'./charts/keycloak/values-overrides.yaml'
]
))
k8s_yaml(helm(
'./charts/fp',
values=['./charts/fp/values.yaml'],
@ -142,13 +107,13 @@ k8s_yaml(helm(
))
k8s_yaml(helm(
'./charts/velero/velero',
namespace='velero',
values=[
'./charts/velero/values.yaml'
]
))
# k8s_yaml(helm(
# './charts/velero/velero',
# namespace='velero',
# values=[
# './charts/velero/values.yaml'
# ]
# ))
@ -188,13 +153,17 @@ k8s_yaml(helm(
# ## before you think of switching to valkey, dragonfly, or one of the other redis alternatives, STOP. Uppy is picky.
# ## I tested dragonfly, valkey, and KeyDB. Uppy's ioredis client was unable to connect. "ECONNREFUSED" ...
# ## Uppy was only happy connecting to official redis.
# k8s_yaml(helm(
# './charts/redis/redis',
# namespace='futureporn',
# values=[
# './charts/redis/values-overrides.yaml'
# ]
# ))
k8s_yaml(helm(
'./charts/redis/redis',
namespace='futureporn',
values=[
'./charts/redis/values-overrides.yaml'
]
))
k8s_resource(
workload='redis-master',
labels=['database']
)
k8s_yaml(helm(
'./charts/cert-manager/cert-manager',
@ -223,46 +192,46 @@ k8s_resource(
# docker_build('fp/link2cid', './packages/link2cid')
docker_build(
'fp/bot',
'.',
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./services/bot',
'./packages/types',
'./packages/utils',
'./packages/fetchers',
],
dockerfile='./dockerfiles/bot.dockerfile',
target='dev',
live_update=[
sync('./services/bot', '/app/services/bot')
]
)
# docker_build(
# 'fp/bot',
# '.',
# only=[
# './.npmrc',
# './package.json',
# './pnpm-lock.yaml',
# './pnpm-workspace.yaml',
# './services/bot',
# './packages/types',
# './packages/utils',
# './packages/fetchers',
# ],
# dockerfile='./dockerfiles/bot.dockerfile',
# target='dev',
# live_update=[
# sync('./services/bot', '/app/services/bot')
# ]
# )
docker_build(
'fp/scout',
'.',
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./packages/types',
'./packages/utils',
'./packages/fetchers',
'./services/scout',
],
dockerfile='./dockerfiles/scout.dockerfile',
target='dev',
# target='prod',
live_update=[
sync('./services/scout', '/app/services/scout')
]
)
# docker_build(
# 'fp/scout',
# '.',
# only=[
# './.npmrc',
# './package.json',
# './pnpm-lock.yaml',
# './pnpm-workspace.yaml',
# './packages/types',
# './packages/utils',
# './packages/fetchers',
# './services/scout',
# ],
# dockerfile='./dockerfiles/scout.dockerfile',
# target='dev',
# # target='prod',
# live_update=[
# sync('./services/scout', '/app/services/scout')
# ]
# )
@ -271,12 +240,6 @@ docker_build(
load('ext://uibutton', 'cmd_button')
cmd_button('keycloak:seed',
argv=['./scripts/keycloak-seed.sh'],
resource='keycloak',
icon_name='start',
text='create keycloak database',
)
cmd_button('postgres:restore',
argv=['./scripts/postgres-restore.sh'],
@ -290,18 +253,30 @@ cmd_button('postgres:drop',
icon_name='delete',
text='DROP all databases'
)
cmd_button('migrations-schema:refresh',
argv=['echo', '@todo please restart postgrest container manually.'],
resource='migrations-schema',
icon_name='refresh',
text='Refresh schema cache'
cmd_button('postgres:create:bright',
argv=['sh', './scripts/postgres-create-bright.sh'],
resource='bright',
icon_name='star',
text='Create bright db'
)
cmd_button('migrations-data:refresh',
argv=['echo', '@todo please restart postgrest container manually.'],
resource='migrations-data',
icon_name='refresh',
text='Refresh schema cache'
cmd_button('postgres:create:superstreamer',
argv=['sh', './scripts/postgres-create-superstreamer.sh'],
resource='superstreamer-api',
icon_name='star',
text='Create superstreamer db'
)
# cmd_button('migrations-schema:refresh',
# argv=['echo', '@todo please restart postgrest container manually.'],
# resource='migrations-schema',
# icon_name='refresh',
# text='Refresh schema cache'
# )
# cmd_button('migrations-data:refresh',
# argv=['echo', '@todo please restart postgrest container manually.'],
# resource='migrations-data',
# icon_name='refresh',
# text='Refresh schema cache'
# )
## @todo let's make this get a random room from scout then use the random room to record via POST /recordings
# cmd_button('capture-worker:create',
# argv=['./scripts/capture-integration.sh'],
@ -314,8 +289,28 @@ cmd_button('migrations-data:refresh',
# labels=['backend'],
# resource_deps=['postgrest', 'postgresql-primary'],
# )
k8s_resource(
workload='superstreamer-app',
labels=['app'],
resource_deps=['postgresql-primary', 'redis-master'],
port_forwards=['52002']
)
k8s_resource(
workload='superstreamer-api',
labels=['app'],
resource_deps=['postgresql-primary', 'redis-master'],
port_forwards=['52001']
)
k8s_resource(
workload='superstreamer-stitcher',
labels=['app'],
resource_deps=['postgresql-primary', 'redis-master'],
)
k8s_resource(
workload='superstreamer-artisan',
labels=['app'],
resource_deps=['postgresql-primary', 'redis-master'],
)
cmd_button('pgadmin4:restore',
@ -334,48 +329,77 @@ cmd_button('build:test',
## we ignore unused image warnings because we do actually use this image.
## instead of being invoked by helm, we start a container using this image manually via Tilt UI
# update_settings(suppress_unused_image_warnings=["fp/migrations"])
docker_build(
'fp/migrations-schema',
'.',
dockerfile='dockerfiles/migrations-schema.dockerfile',
target='migrations-schema',
pull=False,
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./services/migrations-schema'
],
)
docker_build(
'fp/migrations-data',
'.',
dockerfile='dockerfiles/migrations-data.dockerfile',
target='migrations-data',
pull=False,
only=[
'./.npmrc',
'./package.json',
'./pnpm-lock.yaml',
'./pnpm-workspace.yaml',
'./services/migrations-data'
],
)
# docker_build(
# 'fp/migrations-schema',
# '.',
# dockerfile='dockerfiles/migrations-schema.dockerfile',
# target='migrations-schema',
# pull=False,
# only=[
# './.npmrc',
# './package.json',
# './pnpm-lock.yaml',
# './pnpm-workspace.yaml',
# './services/migrations-schema'
# ],
# )
# docker_build(
# 'fp/migrations-data',
# '.',
# dockerfile='dockerfiles/migrations-data.dockerfile',
# target='migrations-data',
# pull=False,
# only=[
# './.npmrc',
# './package.json',
# './pnpm-lock.yaml',
# './pnpm-workspace.yaml',
# './services/migrations-data'
# ],
# )
## Uncomment the following for fp/next in dev mode
## this is useful for changing the UI and seeing results
docker_build(
'fp/next',
'.',
dockerfile='dockerfiles/next.dockerfile',
target='dev',
live_update=[
sync('./services/next', '/app/services/next')
],
pull=False,
)
# docker_build(
# 'fp/next',
# '.',
# dockerfile='dockerfiles/next.dockerfile',
# target='dev',
# live_update=[
# sync('./services/next', '/app/services/next')
# ],
# pull=False,
# )
# docker_build(
# 'fp/superstreamer-artisan',
# './contrib/superstreamer/packages/artisan',
# dockerfile='contrib/superstreamer/packages/artisan/Dockerfile',
# )
# docker_build(
# 'fp/superstreamer-api',
# './contrib/superstreamer/packages/api',
# dockerfile='contrib/superstreamer/packages/api/Dockerfile',
# )
# docker_build(
# 'fp/superstreamer-app',
# './contrib/superstreamer/packages/app',
# dockerfile='contrib/superstreamer/packages/app/Dockerfile',
# )
# docker_build(
# 'fp/superstreamer-stitcher',
# './contrib/superstreamer/packages/stitcher',
# dockerfile='contrib/superstreamer/packages/stitcher/Dockerfile',
# )
docker_build(
'fp/bright',
'.',
dockerfile='dockerfiles/bright.dockerfile',
live_update=[
sync('./services/bright', '/app')
],
target='dev'
)
@ -449,21 +473,29 @@ docker_build(
# resource_deps=['redis-master'],
# labels=['backend'],
# )
# k8s_resource(
# workload='next',
# links=[
# link('https://next.fp.sbtp.xyz')
# ],
# resource_deps=['postgrest', 'postgresql-primary'],
# labels=['frontend'],
# port_forwards=['3000'],
# )
k8s_resource(
workload='next',
workload='bright',
links=[
link('https://next.fp.sbtp.xyz')
link('https://bright.fp.sbtp.xyz')
],
resource_deps=['postgrest', 'postgresql-primary'],
labels=['frontend'],
port_forwards=['3000'],
resource_deps=['postgresql-primary'],
labels=['app'],
port_forwards=['4000'],
)
# whoami is for testing routing
k8s_resource(
workload='whoami',
labels=['frontend'],
labels=['app'],
links=[
link('https://whoami.fp.sbtp.xyz/')
]
@ -491,15 +523,6 @@ k8s_resource(
# )
k8s_resource(
workload='keycloak',
links=[
link('https://keycloak.fp.sbtp.xyz'),
],
port_forwards=['8080'],
labels=['backend'],
)
# k8s_resource(
@ -545,25 +568,22 @@ k8s_resource(
)
k8s_resource(
workload='factory',
labels=['backend'],
)
docker_build(
'fp/factory',
'.',
dockerfile='./dockerfiles/factory.dockerfile',
target='dev',
live_update=[
sync('./services/factory', '/app/services/factory')
],
pull=False,
)
# k8s_resource(
# workload='redis-master',
# labels=['cache']
# workload='factory',
# labels=['backend'],
# )
# docker_build(
# 'fp/factory',
# '.',
# dockerfile='./dockerfiles/factory.dockerfile',
# target='dev',
# live_update=[
# sync('./services/factory', '/app/services/factory')
# ],
# pull=False,
# )
# k8s_resource(
# workload='bot',
# labels=['backend'],
@ -574,15 +594,15 @@ docker_build(
# workload='chihaya',
# labels=['backend']
# )
k8s_resource(
workload='postgrest',
# port_forwards=['9000'],
labels=['database'],
links=[
link('https://postgrest.fp.sbtp.xyz'),
],
resource_deps=['postgresql-primary'],
)
# k8s_resource(
# workload='postgrest',
# # port_forwards=['9000'],
# labels=['database'],
# links=[
# link('https://postgrest.fp.sbtp.xyz'),
# ],
# resource_deps=['postgresql-primary'],
# )
k8s_resource(
workload='traefik',
links=[
@ -596,16 +616,16 @@ k8s_resource(
port_forwards=['5050:80'],
labels=['database'],
)
k8s_resource(
workload='migrations-schema',
labels=['database'],
resource_deps=['postgresql-primary'],
)
k8s_resource(
workload='migrations-data',
labels=['database'],
resource_deps=['postgresql-primary'],
)
# k8s_resource(
# workload='migrations-schema',
# labels=['database'],
# resource_deps=['postgresql-primary'],
# )
# k8s_resource(
# workload='migrations-data',
# labels=['database'],
# resource_deps=['postgresql-primary'],
# )
k8s_resource(
workload='cert-manager',

View File

@ -23,11 +23,6 @@ We override default values in the parent folder.
helm repo add jetstack https://charts.jetstack.io --force-update
helm pull jetstack/cert-manager --untar --destination ./charts/cert-manager
### valkey
helm repo add bitnami https://charts.bitnami.com/bitnami
helm pull bitnami/valkey --untar --destination ./charts/valkey
### redis
helm repo add bitnami https://charts.bitnami.com/bitnami
@ -37,11 +32,6 @@ We override default values in the parent folder.
helm pull oci://ghcr.io/fyralabs/chisel-operator/chisel-operator --version 0.1.0 --untar --destination ./charts/chisel-operator
### ngrok
helm repo add ngrok https://ngrok.github.io/kubernetes-ingress-controller
helm pull ngrok/kubernetes-ingress-controller --version 0.14.0 --untar --destination ./charts/kubernetes-ingress-controller
### traefik
helm repo add traefik https://traefik.github.io/charts
@ -57,20 +47,3 @@ We override default values in the parent folder.
helm repo add external-secrets https://charts.external-secrets.io
helm pull external-secrets/external-secrets --version 0.10.2 --untar --destination ./charts/external-secrets
### drupal
helm pull oci://registry-1.docker.io/bitnamicharts/drupal --version 20.0.10 --untar --destination ./charts/drupal
### mariadb
helm repo add bitnami https://charts.bitnami.com/bitnami --force-update
helm pull bitnami/mariadb --untar --destination ./charts/mariadb
### phpmyadmin
helm pull bitnami/phpmyadmin --version 17.0.7 --untar --destination ./charts/phpmyadmin
### keycloak
helm pull bitnami/keycloak --version 24.2.2 --untar --destination ./charts/keycloak

View File

@ -1,10 +1,11 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: bot
namespace: futureporn
labels:
app: bot
app.kubernetes.io/name: bot
spec:
replicas: {{ .Values.bot.replicas }}
selector:
@ -18,30 +19,51 @@ spec:
containers:
- name: bot
image: "{{ .Values.bot.imageName }}"
imagePullPolicy: Always
ports:
- containerPort: 8080
env:
- name: DISCORD_APPLICATION_ID
- name: SCOUT_URL
value: "{{ .Values.scout.url }}"
- name: POSTGREST_URL
value: "{{ .Values.postgrest.url }}"
- name: NODE_ENV
value: production
- name: AUTOMATION_USER_JWT
valueFrom:
secretKeyRef:
name: discord
key: applicationId
name: bot
key: automationUserJwt
- name: DISCORD_TOKEN
valueFrom:
secretKeyRef:
name: discord
key: token
name: bot
key: discordToken
- name: DISCORD_APPLICATION_ID
valueFrom:
secretKeyRef:
name: bot
key: discordApplicationId
- name: DISCORD_CHANNEL_ID
value: "{{ .Values.bot.discordChannelId }}"
valueFrom:
secretKeyRef:
name: bot
key: discordChannelId
- name: DISCORD_GUILD_ID
value: "{{ .Values.bot.discordGuildId }}"
valueFrom:
secretKeyRef:
name: bot
key: discordGuildId
- name: WORKER_CONNECTION_STRING
valueFrom:
secretKeyRef:
name: bot
key: workerConnectionString
- name: HTTP_PROXY
valueFrom:
secretKeyRef:
name: capture
key: httpProxy
resources:
limits:
cpu: "500m"
memory: "512Mi"
requests:
cpu: "250m"
memory: "256Mi"
cpu: 150m
memory: 512Mi
restartPolicy: Always

View File

@ -42,19 +42,6 @@ spec:
secretKeyRef:
name: patreon
key: clientSecret
- name: KEYCLOAK_CLIENT_ID
value: futureporn
- name: KEYCLOAK_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: keycloak
key: clientSecret
- name: KEYCLOAK_ISSUER
value: {{ .Values.keycloak.issuer | quote }}
- name: KEYCLOAK_URL
value: {{ printf "https://%s" .Values.keycloak.hostname | quote }}
- name: KEYCLOAK_LOCAL_URL
value: {{ .Values.keycloak.localUrl | quote }}
ports:
- name: web
containerPort: 3000

View File

@ -1,69 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: bot
namespace: futureporn
labels:
app.kubernetes.io/name: bot
spec:
replicas: {{ .Values.bot.replicas }}
selector:
matchLabels:
app: bot
template:
metadata:
labels:
app: bot
spec:
containers:
- name: bot
image: "{{ .Values.bot.imageName }}"
env:
- name: SCOUT_URL
value: "{{ .Values.scout.url }}"
- name: POSTGREST_URL
value: "{{ .Values.postgrest.url }}"
- name: NODE_ENV
value: production
- name: AUTOMATION_USER_JWT
valueFrom:
secretKeyRef:
name: bot
key: automationUserJwt
- name: DISCORD_TOKEN
valueFrom:
secretKeyRef:
name: bot
key: discordToken
- name: DISCORD_APPLICATION_ID
valueFrom:
secretKeyRef:
name: bot
key: discordApplicationId
- name: DISCORD_CHANNEL_ID
valueFrom:
secretKeyRef:
name: bot
key: discordChannelId
- name: DISCORD_GUILD_ID
valueFrom:
secretKeyRef:
name: bot
key: discordGuildId
- name: WORKER_CONNECTION_STRING
valueFrom:
secretKeyRef:
name: bot
key: workerConnectionString
- name: HTTP_PROXY
valueFrom:
secretKeyRef:
name: capture
key: httpProxy
resources:
limits:
cpu: 150m
memory: 512Mi
restartPolicy: Always

View File

@ -0,0 +1,87 @@
---
apiVersion: v1
kind: Pod
metadata:
name: bright
namespace: futureporn
labels:
app.kubernetes.io/name: bright
spec:
containers:
- name: bright
image: {{ .Values.bright.imageName | quote }}
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: bright
key: databaseUrl
- name: SECRET_KEY_BASE
valueFrom:
secretKeyRef:
name: bright
key: secretKeyBase
- name: DATABASE_HOST
value: postgresql-primary.futureporn.svc.cluster.local
- name: PORT
value: {{ .Values.bright.port | quote }}
- name: SUPERSTREAMER_URL
value: {{ .Values.superstreamer.api.localUrl | quote }}
- name: PUBLIC_S3_ENDPOINT
value: {{ .Values.bright.s3.endpoint | quote }}
- name: SUPERSTREAMER_AUTH_TOKEN
valueFrom:
secretKeyRef:
name: superstreamer
key: authToken
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: postgresql
key: password
- name: DATABASE
value: bright
- name: MIX_ENV
value: dev
ports:
- name: web
containerPort: {{ .Values.bright.port }}
resources: {}
restartPolicy: OnFailure
---
apiVersion: v1
kind: Service
metadata:
name: bright
namespace: futureporn
annotations:
external-dns.alpha.kubernetes.io/hostname: "{{ .Values.bright.hostname }}"
spec:
type: LoadBalancer
selector:
app.kubernetes.io/name: bright
ports:
- name: web
port: {{ .Values.bright.port }}
targetPort: web
protocol: TCP
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: bright
namespace: futureporn
spec:
secretName: bright-tls
issuerRef:
name: "{{ .Values.certManager.issuer }}"
kind: ClusterIssuer
dnsNames:
- "{{ .Values.bright.hostname }}"

View File

@ -1,44 +0,0 @@
## most of keycloak's config is done thru it's Helm Chart values-overrides.yaml in ../../keycloak
## however, there are some things that said Chart doesn't handle for us, such as Certificates and traefik HTTPRoutes.
## we handle those out-of-spec things here
---
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: keycloak-httproute
namespace: futureporn
spec:
parentRefs:
- name: traefik-gateway
hostnames:
- keycloak.fp.sbtp.xyz
rules:
- matches:
- path:
type: PathPrefix
value: /
filters:
- type: ResponseHeaderModifier
responseHeaderModifier:
add:
- name: x-cj-was-here
value: "true"
backendRefs:
- name: keycloak
port: 8080
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: keycloak
namespace: futureporn
spec:
secretName: keycloak-tls
issuerRef:
name: {{ .Values.certManager.issuer | quote }}
kind: ClusterIssuer
dnsNames:
- {{ .Values.keycloak.hostname | quote }}

View File

@ -0,0 +1,339 @@
## we don't use this because I don't know of a good way to sync the image tag with that of the postgres pod.
## It's more foolproof to use a script activated by a button in Tilt UI
# ---
# apiVersion: batch/v1
# kind: Job
# metadata:
# name: superstreamer-database-seed
# namespace: futureporn
# spec:
# template:
# spec:
# restartPolicy: Never
# containers:
# - name: postgres-client
# image: postgres:latest
# command: ["sh", "-c"]
# args:
# - |
# psql -h postgresql-primary.futureporn.svc.cluster.local \
# -U postgres \
# -c "CREATE DATABASE sprs";
# env:
# - name: PGPASSWORD
# valueFrom:
# secretKeyRef:
# name: postgresql
# key: password
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: superstreamer-app
namespace: futureporn
spec:
replicas: 1
selector:
matchLabels:
app: superstreamer-app
template:
metadata:
labels:
app: superstreamer-app
spec:
containers:
- name: superstreamer-app
image: {{ .Values.superstreamer.app.image | quote }}
ports:
- containerPort: 52000
env:
- name: PUBLIC_API_ENDPOINT
value: http://localhost:52001
- name: PUBLIC_STITCHER_ENDPOINT
value: http://localhost:52002
- name: DATABASE_URI
valueFrom:
secretKeyRef:
name: superstreamer
key: databaseUri
- name: S3_ENDPOINT
valueFrom:
secretKeyRef:
name: superstreamer
key: s3Endpoint
- name: S3_REGION
valueFrom:
secretKeyRef:
name: superstreamer
key: s3Region
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
name: superstreamer
key: s3AccessKey
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
name: superstreamer
key: s3SecretKey
- name: S3_BUCKET
valueFrom:
secretKeyRef:
name: superstreamer
key: s3SecretKey
- name: PUBLIC_S3_ENDPOINT
valueFrom:
secretKeyRef:
name: superstreamer
key: publicS3Endpoint
- name: SUPER_SECRET
valueFrom:
secretKeyRef:
name: superstreamer
key: superSecret
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: superstreamer-api
namespace: futureporn
spec:
replicas: 1
selector:
matchLabels:
app: superstreamer-api
template:
metadata:
labels:
app: superstreamer-api
spec:
containers:
- name: superstreamer-api
image: {{ .Values.superstreamer.api.image | quote }}
ports:
- containerPort: 52001
env:
- name: REDIS_HOST
value: {{ .Values.superstreamer.redisUrl | quote }}
- name: REDIS_PORT
value: {{ .Values.superstreamer.redisPort | quote }}
- name: DATABASE_URI
valueFrom:
secretKeyRef:
name: superstreamer
key: databaseUri
- name: S3_ENDPOINT
valueFrom:
secretKeyRef:
name: superstreamer
key: s3Endpoint
- name: S3_REGION
valueFrom:
secretKeyRef:
name: superstreamer
key: s3Region
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
name: superstreamer
key: s3AccessKey
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
name: superstreamer
key: s3SecretKey
- name: S3_BUCKET
valueFrom:
secretKeyRef:
name: superstreamer
key: s3Bucket
- name: PUBLIC_S3_ENDPOINT
valueFrom:
secretKeyRef:
name: superstreamer
key: publicS3Endpoint
- name: SUPER_SECRET
valueFrom:
secretKeyRef:
name: superstreamer
key: superSecret
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: superstreamer-stitcher
namespace: futureporn
spec:
replicas: 1
selector:
matchLabels:
app: superstreamer-stitcher
template:
metadata:
labels:
app: superstreamer-stitcher
spec:
containers:
- name: superstreamer-stitcher
image: {{ .Values.superstreamer.stitcher.image | quote }}
ports:
- containerPort: 52002
env:
- name: REDIS_HOST
value: {{ .Values.superstreamer.redisUrl | quote }}
- name: REDIS_PORT
value: {{ .Values.superstreamer.redisPort | quote }}
- name: PUBLIC_API_ENDPOINT
value: "http://localhost:52001"
- name: PUBLIC_STITCHER_ENDPOINT
value: "http://localhost:52002"
- name: DATABASE_URI
valueFrom:
secretKeyRef:
name: superstreamer
key: databaseUri
- name: S3_ENDPOINT
valueFrom:
secretKeyRef:
name: superstreamer
key: s3Endpoint
- name: S3_REGION
valueFrom:
secretKeyRef:
name: superstreamer
key: s3Region
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
name: superstreamer
key: s3AccessKey
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
name: superstreamer
key: s3SecretKey
- name: S3_BUCKET
valueFrom:
secretKeyRef:
name: superstreamer
key: s3Bucket
- name: PUBLIC_S3_ENDPOINT
valueFrom:
secretKeyRef:
name: superstreamer
key: publicS3Endpoint
- name: SUPER_SECRET
valueFrom:
secretKeyRef:
name: superstreamer
key: superSecret
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: superstreamer-artisan
namespace: futureporn
spec:
replicas: 1
selector:
matchLabels:
app: superstreamer-artisan
template:
metadata:
labels:
app: superstreamer-artisan
spec:
containers:
- name: superstreamer-artisan
image: {{ .Values.superstreamer.artisan.image | quote }}
env:
- name: REDIS_HOST
value: {{ .Values.superstreamer.redisUrl | quote }}
- name: REDIS_PORT
value: {{ .Values.superstreamer.redisPort | quote }}
- name: DATABASE_URI
valueFrom:
secretKeyRef:
name: superstreamer
key: databaseUri
- name: S3_ENDPOINT
valueFrom:
secretKeyRef:
name: superstreamer
key: s3Endpoint
- name: S3_REGION
valueFrom:
secretKeyRef:
name: superstreamer
key: s3Region
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
name: superstreamer
key: s3AccessKey
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
name: superstreamer
key: s3SecretKey
- name: S3_BUCKET
valueFrom:
secretKeyRef:
name: superstreamer
key: s3Bucket
- name: PUBLIC_S3_ENDPOINT
valueFrom:
secretKeyRef:
name: superstreamer
key: publicS3Endpoint
- name: SUPER_SECRET
valueFrom:
secretKeyRef:
name: superstreamer
key: superSecret
---
apiVersion: v1
kind: Service
metadata:
name: superstreamer-app
namespace: futureporn
spec:
selector:
app: superstreamer-app
ports:
- protocol: TCP
port: 52000
targetPort: 52000
---
apiVersion: v1
kind: Service
metadata:
name: superstreamer-api
namespace: futureporn
spec:
selector:
app: superstreamer-api
ports:
- protocol: TCP
port: 52001
targetPort: 52001
---
apiVersion: v1
kind: Service
metadata:
name: superstreamer-stitcher
namespace: futureporn
spec:
selector:
app: superstreamer-stitcher
ports:
- protocol: TCP
port: 52002
targetPort: 52002

View File

@ -1,163 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: uppy
namespace: futureporn
spec:
replicas: {{ .Values.uppy.replicas }}
minReadySeconds: 5
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 2
maxUnavailable: 1
selector:
matchLabels:
app: uppy
template:
metadata:
labels:
app: uppy
spec:
containers:
- name: uppy
image: docker.io/transloadit/companion:latest
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 150Mi
requests:
memory: 100Mi
env:
- name: COMPANION_STREAMING_UPLOAD
value: "true"
- name: COMPANION_CLIENT_ORIGINS
value: "{{ .Values.uppy.clientOrigins }}"
- name: COMPANION_DATADIR
value: /tmp/
- name: COMPANION_DOMAIN
value: "{{ .Values.uppy.domain }}"
- name: COMPANION_PROTOCOL
value: https
- name: COMPANION_REDIS_URL
value: "{{ .Values.uppy.redisUrl }}"
- name: COMPANION_SECRET
valueFrom:
secretKeyRef:
name: uppy
key: secret
- name: COMPANION_PREAUTH_SECRET
valueFrom:
secretKeyRef:
name: uppy
key: preAuthSecret
- name: COMPANION_DROPBOX_KEY
valueFrom:
secretKeyRef:
name: uppy
key: dropboxKey
- name: COMPANION_DROPBOX_SECRET
valueFrom:
secretKeyRef:
name: uppy
key: dropboxSecret
- name: COMPANION_BOX_KEY
valueFrom:
secretKeyRef:
name: uppy
key: boxKey
- name: COMPANION_BOX_SECRET
valueFrom:
secretKeyRef:
name: uppy
key: boxSecret
- name: COMPANION_GOOGLE_KEY
valueFrom:
secretKeyRef:
name: uppy
key: googleKey
- name: COMPANION_GOOGLE_SECRET
valueFrom:
secretKeyRef:
name: uppy
key: googleSecret
- name: COMPANION_AWS_KEY
valueFrom:
secretKeyRef:
name: uppy
key: awsKey
- name: COMPANION_AWS_SECRET
valueFrom:
secretKeyRef:
name: uppy
key: awsSecret
- name: COMPANION_AWS_BUCKET
value: "{{ .Values.uppy.s3.bucket }}"
- name: COMPANION_AWS_REGION
value: "{{ .Values.uppy.s3.region }}"
- name: COMPANION_AWS_ENDPOINT
value: "{{ .Values.uppy.s3.endpoint }}"
# - name: COMPANION_AWS_PREFIX
# value: "{{ .Values.uppy.s3.prefix }}"
## COMPANION_OAUTH_DOMAIN is only necessary if using a different domain per each uppy pod.
## We don't need this because we are load balancing the pods so they all use the same domain name.
## @see https://github.com/transloadit/uppy/blob/f4dd3d534ff4378f3a2f73fe327358bcbde74059/docs/companion.md#server
- name: COMPANION_OAUTH_DOMAIN
value: ''
- name: COMPANION_PATH
value: ''
- name: COMPANION_IMPLICIT_PATH
value: ''
- name: COMPANION_DOMAINS
value: ''
## https://uppy.io/docs/companion/#uploadurls-companion_upload_urls
- name: COMPANION_UPLOAD_URLS
value: "{{ .Values.uppy.uploadUrls }}"
ports:
- containerPort: 3020
volumeMounts:
- name: uppy-data
mountPath: /mnt/uppy-data
volumes:
- name: uppy-data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: uppy
namespace: futureporn
annotations:
external-dns.alpha.kubernetes.io/hostname: "{{ .Values.uppy.hostname }}"
spec:
type: LoadBalancer
ports:
- port: 3020
targetPort: 3020
protocol: TCP
selector:
app: uppy
# Welcome to Companion v4.15.1
# ===================================
# Congratulations on setting up Companion! Thanks for joining our cause, you have taken
# the first step towards the future of file uploading! We
# hope you are as excited about this as we are!
# While you did an awesome job on getting Companion running, this is just the welcome
# message, so let's talk about the places that really matter:
# - Be sure to add the following URLs as your Oauth redirect uris on their corresponding developer interfaces:
# https://uppy.fp.sbtp.xyz/drive/redirect, https://uppy.fp.sbtp.xyz/googlephotos/redirect, https://uppy.fp.sbtp.xyz/dropbox/redirect, https://uppy.fp.sbtp.xyz/box/redirect, https://uppy.fp.sbtp.xyz/instagram/redirect, https://uppy.fp.sbtp.xyz/facebook/redirect, https://uppy.fp.sbtp.xyz/onedrive/redirect, https://uppy.fp.sbtp.xyz/zoom/redirect, https://uppy.fp.sbtp.xyz/unsplash/redirect
# - The URL https://uppy.fp.sbtp.xyz/metrics is available for statistics to keep Companion running smoothly
# - https://github.com/transloadit/uppy/issues - report your bugs here
# So quit lollygagging, start uploading and experience the future!

View File

@ -7,10 +7,10 @@ environment: development
# storageClassName: csi-hostpath-sc # used by minikube
storageClassName: standard # used by Kind
s3:
endpoint: https://s3.us-west-000.backblazeb2.com
region: us-west-000
endpoint: https://futureporn.hel1.your-objectstorage.com
region: hel1
buckets:
main: fp-dev
main: futureporn
usc: fp-usc-dev
backup: futureporn-db-backup-dev
link2cid:
@ -30,8 +30,8 @@ capture:
mailbox:
imageName: fp/mailbox
replicas: 1
cdnBucketUrl: https://fp-dev.b-cdn.net
s3BucketName: fp-dev
cdnBucketUrl: https://futureporn.hel1.your-objectstorage.com
s3BucketName: futureporn
port: 5000
factory:
replicas: 1
@ -52,16 +52,31 @@ realtime:
adminEmail: cj@futureporn.net
echo:
hostname: echo.fp.sbtp.xyz
superstreamer:
redisUrl: redis-master.futureporn.svc.cluster.local
redisPort: 6379
app:
#image: fp/superstreamer-app
image: "superstreamerapp/app:alpha"
api:
#image: fp/superstreamer-api
localUrl: http://superstreamer-api.futureporn.svc.cluster.local:52001
image: "superstreamerapp/api:alpha"
artisan:
#image: fp/superstreamer-artisan
image: "superstreamerapp/artisan:alpha"
stitcher:
#image: fp/superstreamer-stitcher
image: "superstreamerapp/stitcher:alpha"
uppy:
replicas: 3
hostname: uppy.fp.sbtp.xyz
imageName: fp/uppy
redisUrl: redis-master.futureporn.svc.cluster.local
s3:
endpoint: https://s3.us-west-000.backblazeb2.com
bucket: fp-usc-dev
region: us-west-000
prefix: s3
endpoint: your-objectstorage.com
bucket: futureporn
region: hel1
clientOrigins: next.fp.sbtp.xyz
domain: uppy.fp.sbtp.xyz
uploadUrls: https://uppy.fp.sbtp.xyz/files
@ -96,11 +111,7 @@ supertokens:
port: 3348
hostname: supertokens.fp.sbtp.xyz
replicas: 1
keycloak:
hostname: keycloak.fp.sbtp.xyz
localUrl: http://keycloak.futureporn.svc.cluster.local:8080
replicas: 1
issuer: https://keycloak.fp.sbtp.xyz/realms/futureporn
logto:
admin:
port: 3002
@ -116,3 +127,12 @@ migrations:
imageName: fp/migrations-schema
data:
imageName: fp/migrations-data
authentik:
replias: 1
hostname: auth.fp.sbtp.xyz
bright:
imageName: fp/bright
hostname: bright.fp.sbtp.xyz
port: 4000
s3:
endpoint: https://fp-dev.b-cdn.net

View File

@ -1,25 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# img folder
img/
# Changelog
CHANGELOG.md

View File

@ -1,9 +0,0 @@
dependencies:
- name: postgresql
repository: oci://registry-1.docker.io/bitnamicharts
version: 16.2.2
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.27.0
digest: sha256:80a30494e1385f132dc70f43bf342dfbfc2250d4bea81ddea4de831617245d75
generated: "2024-11-22T07:51:44.565506689Z"

View File

@ -1,35 +0,0 @@
annotations:
category: DeveloperTools
images: |
- name: keycloak
image: docker.io/bitnami/keycloak:26.0.6-debian-12-r0
- name: keycloak-config-cli
image: docker.io/bitnami/keycloak-config-cli:6.1.6-debian-12-r6
licenses: Apache-2.0
apiVersion: v2
appVersion: 26.0.6
dependencies:
- condition: postgresql.enabled
name: postgresql
repository: oci://registry-1.docker.io/bitnamicharts
version: 16.x.x
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
tags:
- bitnami-common
version: 2.x.x
description: Keycloak is a high performance Java-based identity and access management
solution. It lets developers add an authentication layer to their applications with
minimum effort.
home: https://bitnami.com
icon: https://bitnami.com/assets/stacks/keycloak/img/keycloak-stack-220x234.png
keywords:
- keycloak
- access-management
maintainers:
- name: Broadcom, Inc. All Rights Reserved.
url: https://github.com/bitnami/charts
name: keycloak
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/keycloak
version: 24.2.2

View File

@ -1,823 +0,0 @@
<!--- app-name: Keycloak -->
# Bitnami package for Keycloak
Keycloak is a high performance Java-based identity and access management solution. It lets developers add an authentication layer to their applications with minimum effort.
[Overview of Keycloak](https://www.keycloak.org/)
Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
## TL;DR
```console
helm install my-release oci://registry-1.docker.io/bitnamicharts/keycloak
```
Looking to use Keycloak in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog.
## Introduction
Bitnami charts for Helm are carefully engineered, actively maintained and are the quickest and easiest way to deploy containers on a Kubernetes cluster that are ready to handle production workloads.
This chart bootstraps a [Keycloak](https://github.com/bitnami/containers/tree/main/bitnami/keycloak) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
## Prerequisites
- Kubernetes 1.23+
- Helm 3.8.0+
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/keycloak
```
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
These commands deploy a Keycloak application on the Kubernetes cluster in the default configuration.
> **Tip**: List all releases using `helm list`
## Configuration and installation details
### Resource requests and limits
Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case.
To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcePreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
### [Rolling vs Immutable tags](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Use an external database
Sometimes, you may want to have Keycloak connect to an external PostgreSQL database rather than a database within your cluster - for example, when using a managed database service, or when running a single database server for all your applications. To do this, set the `postgresql.enabled` parameter to `false` and specify the credentials for the external database using the `externalDatabase.*` parameters. Here is an example:
```text
postgresql.enabled=false
externalDatabase.host=myexternalhost
externalDatabase.user=myuser
externalDatabase.password=mypassword
externalDatabase.database=mydatabase
externalDatabase.port=5432
```
> NOTE: Only PostgreSQL database server is supported as external database
It is not supported but possible to run Keycloak with an external MSSQL database with the following settings:
```yaml
externalDatabase:
host: "mssql.example.com"
port: 1433
user: keycloak
database: keycloak
existingSecret: passwords
extraEnvVars:
- name: KC_DB # override values from the conf file
value: 'mssql'
- name: KC_DB_URL
value: 'jdbc:sqlserver://mssql.example.com:1433;databaseName=keycloak;'
```
### Importing and exporting a realm
#### Importing a realm
You can import a realm by setting the `KEYCLOAK_EXTRA_ARGS` to contain the `--import-realm` argument.
This will import all `*.json` under `/opt/bitnami/keycloak/data/import` files as a realm into keycloak as per the
official documentation [here](https://www.keycloak.org/server/importExport#_importing_a_realm_from_a_directory). You
can supply the files by mounting a volume e.g. with docker compose as follows:
```yaml
keycloak:
image: bitnami/keycloak:latest
volumes:
- /local/path/to/realms/folder:/opt/bitnami/keycloak/data/import
```
#### Exporting a realm
You can export a realm through the GUI but it will not export users even the option is set, this is a known keycloak
[bug](https://github.com/keycloak/keycloak/issues/23970).
By using the `kc.sh` script you can export a realm with users. Be sure to mount the export folder to a local folder:
```yaml
keycloak:
image: bitnami/keycloak:latest
volumes:
- /local/path/to/export/folder:/export
```
Then open a terminal in the running keycloak container and run:
```bash
kc.sh export --dir /export/ --users realm_file
````
This will export the all the realms with users to the `/export` folder.
### Configure Ingress
This chart provides support for Ingress resources. If you have an ingress controller installed on your cluster, such as [nginx-ingress-controller](https://github.com/bitnami/charts/tree/main/bitnami/nginx-ingress-controller) or [contour](https://github.com/bitnami/charts/tree/main/bitnami/contour) you can utilize the ingress controller to serve your application.To enable Ingress integration, set `ingress.enabled` to `true`.
The most common scenario is to have one host name mapped to the deployment. In this case, the `ingress.hostname` property can be used to set the host name. The `ingress.tls` parameter can be used to add the TLS configuration for this host.
However, it is also possible to have more than one host. To facilitate this, the `ingress.extraHosts` parameter (if available) can be set with the host names specified as an array. The `ingress.extraTLS` parameter (if available) can also be used to add the TLS configuration for extra hosts.
> NOTE: For each host specified in the `ingress.extraHosts` parameter, it is necessary to set a name, path, and any annotations that the Ingress controller should know about. Not all annotations are supported by all Ingress controllers, but [this annotation reference document](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md) lists the annotations supported by many popular Ingress controllers.
Adding the TLS parameter (where available) will cause the chart to generate HTTPS URLs, and the application will be available on port 443. The actual TLS secrets do not have to be generated by this chart. However, if TLS is enabled, the Ingress record will not work until the TLS secret exists.
[Learn more about Ingress controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/).
### Configure admin Ingress
In addition to the Ingress resource described above, this chart also provides the ability to define an Ingress for the admin area of Keycloak, for example the `master` realm.
For this scenario, you can use the Keycloak Config CLI integration with the following values, where `keycloak-admin.example.com` is to be replaced by the actual hostname:
```yaml
adminIngress:
enabled: true
hostname: keycloak-admin.example.com
keycloakConfigCli:
enabled: true
configuration:
master.json: |
{
"realm" : "master",
"attributes": {
"frontendUrl": "https://keycloak-admin.example.com"
}
}
```
### Configure TLS Secrets for use with Ingress
This chart facilitates the creation of TLS secrets for use with the Ingress controller (although this is not mandatory). There are several common use cases:
- Generate certificate secrets based on chart parameters.
- Enable externally generated certificates.
- Manage application certificates via an external service (like [cert-manager](https://github.com/jetstack/cert-manager/)).
- Create self-signed certificates within the chart (if supported).
In the first two cases, a certificate and a key are needed. Files are expected in `.pem` format.
Here is an example of a certificate file:
> NOTE: There may be more than one certificate if there is a certificate chain.
```text
-----BEGIN CERTIFICATE-----
MIID6TCCAtGgAwIBAgIJAIaCwivkeB5EMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV
...
jScrvkiBO65F46KioCL9h5tDvomdU1aqpI/CBzhvZn1c0ZTf87tGQR8NK7v7
-----END CERTIFICATE-----
```
Here is an example of a certificate key:
```text
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAvLYcyu8f3skuRyUgeeNpeDvYBCDcgq+LsWap6zbX5f8oLqp4
...
wrj2wDbCDCFmfqnSJ+dKI3vFLlEz44sAV8jX/kd4Y6ZTQhlLbYc=
-----END RSA PRIVATE KEY-----
```
- If using Helm to manage the certificates based on the parameters, copy these values into the `certificate` and `key` values for a given `*.ingress.secrets` entry.
- If managing TLS secrets separately, it is necessary to create a TLS secret with name `INGRESS_HOSTNAME-tls` (where INGRESS_HOSTNAME is a placeholder to be replaced with the hostname you set using the `*.ingress.hostname` parameter).
- If your cluster has a [cert-manager](https://github.com/jetstack/cert-manager) add-on to automate the management and issuance of TLS certificates, add to `*.ingress.annotations` the [corresponding ones](https://cert-manager.io/docs/usage/ingress/#supported-annotations) for cert-manager.
- If using self-signed certificates created by Helm, set both `*.ingress.tls` and `*.ingress.selfSigned` to `true`.
### Use with ingress offloading SSL
If your ingress controller has the SSL Termination, you should set `proxy` to `edge`.
### Manage secrets and passwords
This chart provides several ways to manage passwords:
- Values passed to the chart: In this scenario, a new secret including all the passwords will be created during the chart installation. When upgrading, it is necessary to provide the secrets to the chart as shown below. Replace the KEYCLOAK_ADMIN_PASSWORD, POSTGRESQL_PASSWORD and POSTGRESQL_PVC placeholders with the correct passwords and PVC name.
```console
helm upgrade keycloak bitnami/keycloak \
--set auth.adminPassword=KEYCLOAK_ADMIN_PASSWORD \
--set postgresql.postgresqlPassword=POSTGRESQL_PASSWORD \
--set postgresql.persistence.existingClaim=POSTGRESQL_PVC
```
- An existing secret with all the passwords via the `existingSecret` parameter.
### Add extra environment variables
In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property.
```yaml
extraEnvVars:
- name: KEYCLOAK_LOG_LEVEL
value: DEBUG
```
Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values.
### Use Sidecars and Init Containers
If additional containers are needed in the same pod (such as additional metrics or logging exporters), they can be defined using the `sidecars` config parameter.
```yaml
sidecars:
- name: your-image-name
image: your-image
imagePullPolicy: Always
ports:
- name: portname
containerPort: 1234
```
If these sidecars export extra ports, extra port definitions can be added using the `service.extraPorts` parameter (where available), as shown in the example below:
```yaml
service:
extraPorts:
- name: extraPort
port: 11311
targetPort: 11311
```
> NOTE: This Helm chart already includes sidecar containers for the Prometheus exporters (where applicable). These can be activated by adding the `--enable-metrics=true` parameter at deployment time. The `sidecars` parameter should therefore only be used for any extra sidecar containers.
If additional init containers are needed in the same pod, they can be defined using the `initContainers` parameter. Here is an example:
```yaml
initContainers:
- name: your-image-name
image: your-image
imagePullPolicy: Always
ports:
- name: portname
containerPort: 1234
```
Learn more about [sidecar containers](https://kubernetes.io/docs/concepts/workloads/pods/) and [init containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/).
### Initialize a fresh instance
The [Bitnami Keycloak](https://github.com/bitnami/containers/tree/main/bitnami/keycloak) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, you can specify custom scripts using the `initdbScripts` parameter as dict.
In addition to this option, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the previous option.
The allowed extensions is `.sh`.
### Deploy extra resources
There are cases where you may want to deploy extra objects, such a ConfigMap containing your app's configuration or some extra deployment with a micro service used by your app. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter.
### Set Pod affinity
This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
## Parameters
### Global parameters
| Name | Description | Value |
| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ |
| `global.imageRegistry` | Global Docker image registry | `""` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` |
| `global.storageClass` | DEPRECATED: use global.defaultStorageClass instead | `""` |
| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` |
### Common parameters
| Name | Description | Value |
| ------------------------ | --------------------------------------------------------------------------------------- | --------------- |
| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` |
| `nameOverride` | String to partially override common.names.fullname | `""` |
| `fullnameOverride` | String to fully override common.names.fullname | `""` |
| `namespaceOverride` | String to fully override common.names.namespace | `""` |
| `commonLabels` | Labels to add to all deployed objects | `{}` |
| `enableServiceLinks` | If set to false, disable Kubernetes service links in the pod spec | `true` |
| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
| `dnsPolicy` | DNS Policy for pod | `""` |
| `dnsConfig` | DNS Configuration pod | `{}` |
| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` |
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
| `diagnosticMode.command` | Command to override all containers in the the statefulset | `["sleep"]` |
| `diagnosticMode.args` | Args to override all containers in the the statefulset | `["infinity"]` |
### Keycloak parameters
| Name | Description | Value |
| -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------- |
| `image.registry` | Keycloak image registry | `REGISTRY_NAME` |
| `image.repository` | Keycloak image repository | `REPOSITORY_NAME/keycloak` |
| `image.digest` | Keycloak image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | Keycloak image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `image.debug` | Specify if debug logs should be enabled | `false` |
| `auth.adminUser` | Keycloak administrator user | `user` |
| `auth.adminPassword` | Keycloak administrator password for the new user | `""` |
| `auth.existingSecret` | Existing secret containing Keycloak admin password | `""` |
| `auth.passwordSecretKey` | Key where the Keycloak admin password is being stored inside the existing secret. | `""` |
| `auth.annotations` | Additional custom annotations for Keycloak auth secret object | `{}` |
| `customCaExistingSecret` | Name of the secret containing the Keycloak custom CA certificates. The secret will be mounted as a directory and configured using KC_TRUSTSTORE_PATHS. | `""` |
| `tls.enabled` | Enable TLS encryption. Required for HTTPs traffic. | `false` |
| `tls.autoGenerated` | Generate automatically self-signed TLS certificates. Currently only supports PEM certificates | `false` |
| `tls.existingSecret` | Existing secret containing the TLS certificates per Keycloak replica | `""` |
| `tls.usePem` | Use PEM certificates as input instead of PKS12/JKS stores | `false` |
| `tls.truststoreFilename` | Truststore filename inside the existing secret | `keycloak.truststore.jks` |
| `tls.keystoreFilename` | Keystore filename inside the existing secret | `keycloak.keystore.jks` |
| `tls.keystorePassword` | Password to access the keystore when it's password-protected | `""` |
| `tls.truststorePassword` | Password to access the truststore when it's password-protected | `""` |
| `tls.passwordsSecret` | Secret containing the Keystore and Truststore passwords. | `""` |
| `spi.existingSecret` | Existing secret containing the Keycloak truststore for SPI connection over HTTPS/TLS | `""` |
| `spi.truststorePassword` | Password to access the truststore when it's password-protected | `""` |
| `spi.truststoreFilename` | Truststore filename inside the existing secret | `keycloak-spi.truststore.jks` |
| `spi.passwordsSecret` | Secret containing the SPI Truststore passwords. | `""` |
| `spi.hostnameVerificationPolicy` | Verify the hostname of the server's certificate. Allowed values: "ANY", "WILDCARD", "STRICT". | `""` |
| `adminRealm` | Name of the admin realm | `master` |
| `production` | Run Keycloak in production mode. TLS configuration is required except when using proxy=edge. | `false` |
| `proxyHeaders` | Set Keycloak proxy headers | `""` |
| `proxy` | reverse Proxy mode edge, reencrypt, passthrough or none | `""` |
| `httpRelativePath` | Set the path relative to '/' for serving resources. Useful if you are migrating from older version which were using '/auth/' | `/` |
| `configuration` | Keycloak Configuration. Auto-generated based on other parameters when not specified | `""` |
| `existingConfigmap` | Name of existing ConfigMap with Keycloak configuration | `""` |
| `extraStartupArgs` | Extra default startup args | `""` |
| `enableDefaultInitContainers` | Deploy default init containers | `true` |
| `initdbScripts` | Dictionary of initdb scripts | `{}` |
| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `""` |
| `command` | Override default container command (useful when using custom images) | `[]` |
| `args` | Override default container args (useful when using custom images) | `[]` |
| `extraEnvVars` | Extra environment variables to be set on Keycloak container | `[]` |
| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` |
| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars | `""` |
### Keycloak statefulset parameters
| Name | Description | Value |
| --------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
| `replicaCount` | Number of Keycloak replicas to deploy | `1` |
| `revisionHistoryLimitCount` | Number of controller revisions to keep | `10` |
| `containerPorts.http` | Keycloak HTTP container port | `8080` |
| `containerPorts.https` | Keycloak HTTPS container port | `8443` |
| `containerPorts.metrics` | Keycloak metrics container port | `9000` |
| `extraContainerPorts` | Optionally specify extra list of additional port-mappings for Keycloak container | `[]` |
| `statefulsetAnnotations` | Optionally add extra annotations on the statefulset resource | `{}` |
| `podSecurityContext.enabled` | Enabled Keycloak pods' Security Context | `true` |
| `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
| `podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
| `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
| `podSecurityContext.fsGroup` | Set Keycloak pod's Security Context fsGroup | `1001` |
| `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` |
| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
| `containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` |
| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` |
| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` |
| `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
| `livenessProbe.enabled` | Enable livenessProbe on Keycloak containers | `true` |
| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `300` |
| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `1` |
| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` |
| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `readinessProbe.enabled` | Enable readinessProbe on Keycloak containers | `true` |
| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` |
| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` |
| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` |
| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `startupProbe.enabled` | Enable startupProbe on Keycloak containers | `false` |
| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` |
| `startupProbe.periodSeconds` | Period seconds for startupProbe | `5` |
| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `60` |
| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
| `customLivenessProbe` | Custom Liveness probes for Keycloak | `{}` |
| `customReadinessProbe` | Custom Rediness probes Keycloak | `{}` |
| `customStartupProbe` | Custom Startup probes for Keycloak | `{}` |
| `lifecycleHooks` | LifecycleHooks to set additional configuration at startup | `{}` |
| `automountServiceAccountToken` | Mount Service Account token in pod | `true` |
| `hostAliases` | Deployment pod host aliases | `[]` |
| `podLabels` | Extra labels for Keycloak pods | `{}` |
| `podAnnotations` | Annotations for Keycloak pods | `{}` |
| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` |
| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` |
| `affinity` | Affinity for pod assignment | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Tolerations for pod assignment | `[]` |
| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `podManagementPolicy` | Pod management policy for the Keycloak statefulset | `Parallel` |
| `priorityClassName` | Keycloak pods' Priority Class Name | `""` |
| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` |
| `terminationGracePeriodSeconds` | Seconds Keycloak pod needs to terminate gracefully | `""` |
| `updateStrategy.type` | Keycloak statefulset strategy type | `RollingUpdate` |
| `updateStrategy.rollingUpdate` | Keycloak statefulset rolling update configuration parameters | `{}` |
| `minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` |
| `extraVolumes` | Optionally specify extra list of additional volumes for Keycloak pods | `[]` |
| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for Keycloak container(s) | `[]` |
| `initContainers` | Add additional init containers to the Keycloak pods | `[]` |
| `sidecars` | Add additional sidecar containers to the Keycloak pods | `[]` |
### Exposure parameters
| Name | Description | Value |
| --------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
| `service.type` | Kubernetes service type | `ClusterIP` |
| `service.http.enabled` | Enable http port on service | `true` |
| `service.ports.http` | Keycloak service HTTP port | `80` |
| `service.ports.https` | Keycloak service HTTPS port | `443` |
| `service.nodePorts` | Specify the nodePort values for the LoadBalancer and NodePort service types. | `{}` |
| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` |
| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
| `service.clusterIP` | Keycloak service clusterIP IP | `""` |
| `service.loadBalancerIP` | loadBalancerIP for the SuiteCRM Service (optional, cloud specific) | `""` |
| `service.loadBalancerSourceRanges` | Address that are allowed when service is LoadBalancer | `[]` |
| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
| `service.annotations` | Additional custom annotations for Keycloak service | `{}` |
| `service.extraPorts` | Extra port to expose on Keycloak service | `[]` |
| `service.extraHeadlessPorts` | Extra ports to expose on Keycloak headless service | `[]` |
| `service.headless.annotations` | Annotations for the headless service. | `{}` |
| `service.headless.extraPorts` | Extra ports to expose on Keycloak headless service | `[]` |
| `ingress.enabled` | Enable ingress record generation for Keycloak | `false` |
| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
| `ingress.pathType` | Ingress path type | `ImplementationSpecific` |
| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` |
| `ingress.controller` | The ingress controller type. Currently supports `default` and `gce` | `default` |
| `ingress.hostname` | Default host for the ingress record (evaluated as template) | `keycloak.local` |
| `ingress.hostnameStrict` | Disables dynamically resolving the hostname from request headers. | `false` |
| `ingress.path` | Default path for the ingress record (evaluated as template) | `""` |
| `ingress.servicePort` | Backend service port to use | `http` |
| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
| `ingress.labels` | Additional labels for the Ingress resource. | `{}` |
| `ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` |
| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` |
| `ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` |
| `ingress.extraPaths` | Any additional arbitrary paths that may need to be added to the ingress under the main host. | `[]` |
| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` |
| `ingress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` |
| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` |
| `adminIngress.enabled` | Enable admin ingress record generation for Keycloak | `false` |
| `adminIngress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
| `adminIngress.pathType` | Ingress path type | `ImplementationSpecific` |
| `adminIngress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` |
| `adminIngress.controller` | The ingress controller type. Currently supports `default` and `gce` | `default` |
| `adminIngress.hostname` | Default host for the admin ingress record (evaluated as template) | `keycloak.local` |
| `adminIngress.path` | Default path for the admin ingress record (evaluated as template) | `""` |
| `adminIngress.servicePort` | Backend service port to use | `http` |
| `adminIngress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
| `adminIngress.labels` | Additional labels for the Ingress resource. | `{}` |
| `adminIngress.tls` | Enable TLS configuration for the host defined at `adminIngress.hostname` parameter | `false` |
| `adminIngress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` |
| `adminIngress.extraHosts` | An array with additional hostname(s) to be covered with the admin ingress record | `[]` |
| `adminIngress.extraPaths` | Any additional arbitrary paths that may need to be added to the admin ingress under the main host. | `[]` |
| `adminIngress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` |
| `adminIngress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` |
| `adminIngress.extraRules` | Additional rules to be covered with this ingress record | `[]` |
| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` |
| `networkPolicy.allowExternal` | Don't require server label for connections | `true` |
| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` |
| `networkPolicy.kubeAPIServerPorts` | List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) | `[]` |
| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` |
| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` |
| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` |
| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` |
### RBAC parameter
| Name | Description | Value |
| --------------------------------------------- | --------------------------------------------------------- | ------- |
| `serviceAccount.create` | Enable the creation of a ServiceAccount for Keycloak pods | `true` |
| `serviceAccount.name` | Name of the created ServiceAccount | `""` |
| `serviceAccount.automountServiceAccountToken` | Auto-mount the service account token in the pod | `false` |
| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
| `serviceAccount.extraLabels` | Additional labels for the ServiceAccount | `{}` |
| `rbac.create` | Whether to create and use RBAC resources or not | `false` |
| `rbac.rules` | Custom RBAC rules | `[]` |
### Other parameters
| Name | Description | Value |
| ----------------------------------------------------------- | -------------------------------------------------------------------------------------------- | ------- |
| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` |
| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` |
| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` |
| `autoscaling.enabled` | Enable autoscaling for Keycloak | `false` |
| `autoscaling.minReplicas` | Minimum number of Keycloak replicas | `1` |
| `autoscaling.maxReplicas` | Maximum number of Keycloak replicas | `11` |
| `autoscaling.targetCPU` | Target CPU utilization percentage | `""` |
| `autoscaling.targetMemory` | Target Memory utilization percentage | `""` |
| `autoscaling.behavior.scaleUp.stabilizationWindowSeconds` | The number of seconds for which past recommendations should be considered while scaling up | `120` |
| `autoscaling.behavior.scaleUp.selectPolicy` | The priority of policies that the autoscaler will apply when scaling up | `Max` |
| `autoscaling.behavior.scaleUp.policies` | HPA scaling policies when scaling up | `[]` |
| `autoscaling.behavior.scaleDown.stabilizationWindowSeconds` | The number of seconds for which past recommendations should be considered while scaling down | `300` |
| `autoscaling.behavior.scaleDown.selectPolicy` | The priority of policies that the autoscaler will apply when scaling down | `Max` |
| `autoscaling.behavior.scaleDown.policies` | HPA scaling policies when scaling down | `[]` |
### Metrics parameters
| Name | Description | Value |
| ------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------- | --------- |
| `metrics.enabled` | Enable exposing Keycloak statistics | `false` |
| `metrics.service.ports.http` | Metrics service HTTP port | `8080` |
| `metrics.service.ports.https` | Metrics service HTTPS port | `8443` |
| `metrics.service.ports.metrics` | Metrics service Metrics port | `9000` |
| `metrics.service.annotations` | Annotations for enabling prometheus to access the metrics endpoints | `{}` |
| `metrics.service.extraPorts` | Add additional ports to the keycloak metrics service (i.e. admin port 9000) | `[]` |
| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` |
| `metrics.serviceMonitor.port` | Metrics service HTTP port | `metrics` |
| `metrics.serviceMonitor.scheme` | Metrics service scheme | `http` |
| `metrics.serviceMonitor.tlsConfig` | Metrics service TLS configuration | `{}` |
| `metrics.serviceMonitor.endpoints` | The endpoint configuration of the ServiceMonitor. Path is mandatory. Port, scheme, tlsConfig, interval, timeout and labellings can be overwritten. | `[]` |
| `metrics.serviceMonitor.path` | Metrics service HTTP path. Deprecated: Use @param metrics.serviceMonitor.endpoints instead | `""` |
| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` |
| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` |
| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` |
| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` |
| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` |
| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` |
| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` |
| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` |
| `metrics.prometheusRule.enabled` | Create PrometheusRule Resource for scraping metrics using PrometheusOperator | `false` |
| `metrics.prometheusRule.namespace` | Namespace which Prometheus is running in | `""` |
| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` |
| `metrics.prometheusRule.groups` | Groups, containing the alert rules. | `[]` |
### keycloak-config-cli parameters
| Name | Description | Value |
| --------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------- |
| `keycloakConfigCli.enabled` | Whether to enable keycloak-config-cli job | `false` |
| `keycloakConfigCli.image.registry` | keycloak-config-cli container image registry | `REGISTRY_NAME` |
| `keycloakConfigCli.image.repository` | keycloak-config-cli container image repository | `REPOSITORY_NAME/keycloak-config-cli` |
| `keycloakConfigCli.image.digest` | keycloak-config-cli container image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `keycloakConfigCli.image.pullPolicy` | keycloak-config-cli container image pull policy | `IfNotPresent` |
| `keycloakConfigCli.image.pullSecrets` | keycloak-config-cli container image pull secrets | `[]` |
| `keycloakConfigCli.annotations` | Annotations for keycloak-config-cli job | `{}` |
| `keycloakConfigCli.command` | Command for running the container (set to default if not set). Use array form | `[]` |
| `keycloakConfigCli.args` | Args for running the container (set to default if not set). Use array form | `[]` |
| `keycloakConfigCli.automountServiceAccountToken` | Mount Service Account token in pod | `true` |
| `keycloakConfigCli.hostAliases` | Job pod host aliases | `[]` |
| `keycloakConfigCli.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if keycloakConfigCli.resources is set (keycloakConfigCli.resources is recommended for production). | `small` |
| `keycloakConfigCli.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
| `keycloakConfigCli.containerSecurityContext.enabled` | Enabled keycloak-config-cli Security Context | `true` |
| `keycloakConfigCli.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` |
| `keycloakConfigCli.containerSecurityContext.runAsUser` | Set keycloak-config-cli Security Context runAsUser | `1001` |
| `keycloakConfigCli.containerSecurityContext.runAsGroup` | Set keycloak-config-cli Security Context runAsGroup | `1001` |
| `keycloakConfigCli.containerSecurityContext.runAsNonRoot` | Set keycloak-config-cli Security Context runAsNonRoot | `true` |
| `keycloakConfigCli.containerSecurityContext.privileged` | Set keycloak-config-cli Security Context privileged | `false` |
| `keycloakConfigCli.containerSecurityContext.readOnlyRootFilesystem` | Set keycloak-config-cli Security Context readOnlyRootFilesystem | `true` |
| `keycloakConfigCli.containerSecurityContext.allowPrivilegeEscalation` | Set keycloak-config-cli Security Context allowPrivilegeEscalation | `false` |
| `keycloakConfigCli.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
| `keycloakConfigCli.containerSecurityContext.seccompProfile.type` | Set keycloak-config-cli Security Context seccomp profile | `RuntimeDefault` |
| `keycloakConfigCli.podSecurityContext.enabled` | Enabled keycloak-config-cli pods' Security Context | `true` |
| `keycloakConfigCli.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
| `keycloakConfigCli.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
| `keycloakConfigCli.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
| `keycloakConfigCli.podSecurityContext.fsGroup` | Set keycloak-config-cli pod's Security Context fsGroup | `1001` |
| `keycloakConfigCli.backoffLimit` | Number of retries before considering a Job as failed | `1` |
| `keycloakConfigCli.podLabels` | Pod extra labels | `{}` |
| `keycloakConfigCli.podAnnotations` | Annotations for job pod | `{}` |
| `keycloakConfigCli.extraEnvVars` | Additional environment variables to set | `[]` |
| `keycloakConfigCli.nodeSelector` | Node labels for pod assignment | `{}` |
| `keycloakConfigCli.podTolerations` | Tolerations for job pod assignment | `[]` |
| `keycloakConfigCli.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` |
| `keycloakConfigCli.extraEnvVarsSecret` | Secret with extra environment variables | `""` |
| `keycloakConfigCli.extraVolumes` | Extra volumes to add to the job | `[]` |
| `keycloakConfigCli.extraVolumeMounts` | Extra volume mounts to add to the container | `[]` |
| `keycloakConfigCli.initContainers` | Add additional init containers to the Keycloak config cli pod | `[]` |
| `keycloakConfigCli.sidecars` | Add additional sidecar containers to the Keycloak config cli pod | `[]` |
| `keycloakConfigCli.configuration` | keycloak-config-cli realms configuration | `{}` |
| `keycloakConfigCli.existingConfigmap` | ConfigMap with keycloak-config-cli configuration | `""` |
| `keycloakConfigCli.cleanupAfterFinished.enabled` | Enables Cleanup for Finished Jobs | `false` |
| `keycloakConfigCli.cleanupAfterFinished.seconds` | Sets the value of ttlSecondsAfterFinished | `600` |
### Database parameters
| Name | Description | Value |
| -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | ------------------ |
| `postgresql.enabled` | Switch to enable or disable the PostgreSQL helm chart | `true` |
| `postgresql.auth.postgresPassword` | Password for the "postgres" admin user. Ignored if `auth.existingSecret` with key `postgres-password` is provided | `""` |
| `postgresql.auth.username` | Name for a custom user to create | `bn_keycloak` |
| `postgresql.auth.password` | Password for the custom user to create | `""` |
| `postgresql.auth.database` | Name for a custom database to create | `bitnami_keycloak` |
| `postgresql.auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials | `""` |
| `postgresql.auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `password` |
| `postgresql.architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` |
| `externalDatabase.host` | Database host | `""` |
| `externalDatabase.port` | Database port number | `5432` |
| `externalDatabase.user` | Non-root username for Keycloak | `bn_keycloak` |
| `externalDatabase.password` | Password for the non-root username for Keycloak | `""` |
| `externalDatabase.database` | Keycloak database name | `bitnami_keycloak` |
| `externalDatabase.existingSecret` | Name of an existing secret resource containing the database credentials | `""` |
| `externalDatabase.existingSecretHostKey` | Name of an existing secret key containing the database host name | `""` |
| `externalDatabase.existingSecretPortKey` | Name of an existing secret key containing the database port | `""` |
| `externalDatabase.existingSecretUserKey` | Name of an existing secret key containing the database user | `""` |
| `externalDatabase.existingSecretDatabaseKey` | Name of an existing secret key containing the database name | `""` |
| `externalDatabase.existingSecretPasswordKey` | Name of an existing secret key containing the database credentials | `""` |
| `externalDatabase.annotations` | Additional custom annotations for external database secret object | `{}` |
### Keycloak Cache parameters
| Name | Description | Value |
| ----------------- | -------------------------------------------------------------------------- | ------------ |
| `cache.enabled` | Switch to enable or disable the keycloak distributed cache for kubernetes. | `true` |
| `cache.stackName` | Set infinispan cache stack to use | `kubernetes` |
| `cache.stackFile` | Set infinispan cache stack filename to use | `""` |
### Keycloak Logging parameters
| Name | Description | Value |
| ---------------- | ------------------------------------------------------------------------------ | --------- |
| `logging.output` | Alternates between the default log output format or json format | `default` |
| `logging.level` | Allowed values as documented: FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL, OFF | `INFO` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install my-release --set auth.adminPassword=secretpassword oci://REGISTRY_NAME/REPOSITORY_NAME/keycloak
```
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
The above command sets the Keycloak administrator password to `secretpassword`.
> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/keycloak
```
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/keycloak/values.yaml)
Keycloak realms, users and clients can be created from the Keycloak administration panel.
## Troubleshooting
Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
## Upgrading
### To 24.1.0
With this update the metrics service listening port is switched to 9000, the same as the keycloak management endpoint is using.
This can be changed by setting `metrics.service.ports.http` to a different value, e.g. 8080 like before this change.
### To 23.0.0
This major updates the PostgreSQL subchart to its newest major, 16.0.0, which uses PostgreSQL 17.x. Follow the [official instructions](https://www.postgresql.org/docs/17/upgrading.html) to upgrade to 17.x.
### To 21.0.0
This major release updates the keycloak branch to its newest major, 24.x.x. Follow the [upstream documentation](https://www.keycloak.org/docs/latest/upgrading/index.html#migrating-to-24-0-0) for upgrade instructions.
### To 20.0.0
This major bump changes the following security defaults:
- `runAsGroup` is changed from `0` to `1001`
- `readOnlyRootFilesystem` is set to `true`
- `resourcesPreset` is changed from `none` to the minimum size working in our test suites (NOTE: `resourcesPreset` is not meant for production usage, but `resources` adapted to your use case).
- `global.compatibility.openshift.adaptSecurityContext` is changed from `disabled` to `auto`.
This could potentially break any customization or init scripts used in your deployment. If this is the case, change the default values to the previous ones.
### To 19.0.0
This major release bumps the PostgreSQL chart version to [14.x.x](https://github.com/bitnami/charts/pull/22750); no major issues are expected during the upgrade.
### To 17.0.0
This major updates the PostgreSQL subchart to its newest major, 13.0.0. [Here](https://github.com/bitnami/charts/tree/master/bitnami/postgresql#to-1300) you can find more information about the changes introduced in that version.
### To 15.0.0
This major updates the default serviceType from `LoadBalancer` to `ClusterIP` to avoid inadvertently exposing Keycloak directly to the internet without an Ingress.
### To 12.0.0
This major updates the PostgreSQL subchart to its newest major, 12.0.0. [Here](https://github.com/bitnami/charts/tree/master/bitnami/postgresql#to-1200) you can find more information about the changes introduced in that version.
### To 10.0.0
This major release updates Keycloak to its major version `19`. Please, refer to the official [Keycloak migration documentation](https://www.keycloak.org/docs/latest/upgrading/index.html#migrating-to-19-0-0) for a complete list of changes and further information.
### To 9.0.0
This major release updates Keycloak to its major version `18`. Please, refer to the official [Keycloak migration documentation](https://www.keycloak.org/docs/latest/upgrading/index.html#migrating-to-18-0-0) for a complete list of changes and further information.
### To 8.0.0
This major release updates Keycloak to its major version `17`. Among other features, this new version has deprecated WildFly in favor of Quarkus, which introduces breaking changes like:
- Removal of `/auth` from the default context path.
- Changes in the configuration and deployment of custom providers.
- Significant changes in configuring Keycloak.
Please, refer to the official [Keycloak migration documentation](https://www.keycloak.org/docs/latest/upgrading/index.html#migrating-to-17-0-0) and [Migrating to Quarkus distribution document](https://www.keycloak.org/migration/migrating-to-quarkus) for a complete list of changes and further information.
### To 7.0.0
This major release updates the PostgreSQL subchart to its newest major *11.x.x*, which contain several changes in the supported values (check the [upgrade notes](https://github.com/bitnami/charts/tree/master/bitnami/postgresql#to-1100) to obtain more information).
#### Upgrading Instructions
To upgrade to *7.0.0* from *6.x*, it should be done reusing the PVC(s) used to hold the data on your previous release. To do so, follow the instructions below (the following example assumes that the release name is *keycloak* and the release namespace *default*):
1. Obtain the credentials and the names of the PVCs used to hold the data on your current release:
```console
export KEYCLOAK_PASSWORD=$(kubectl get secret --namespace default keycloak -o jsonpath="{.data.admin-password}" | base64 --decode)
export POSTGRESQL_PASSWORD=$(kubectl get secret --namespace default keycloak-postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode)
export POSTGRESQL_PVC=$(kubectl get pvc -l app.kubernetes.io/instance=keycloak,app.kubernetes.io/name=postgresql,role=primary -o jsonpath="{.items[0].metadata.name}")
```
1. Delete the PostgreSQL statefulset (notice the option *--cascade=false*) and secret:
```console
kubectl delete statefulsets.apps --cascade=false keycloak-postgresql
kubectl delete secret keycloak-postgresql --namespace default
```
1. Upgrade your release using the same PostgreSQL version:
```console
CURRENT_PG_VERSION=$(kubectl exec keycloak-postgresql-0 -- bash -c 'echo $BITNAMI_IMAGE_VERSION')
helm upgrade keycloak bitnami/keycloak \
--set auth.adminPassword=$KEYCLOAK_PASSWORD \
--set postgresql.image.tag=$CURRENT_PG_VERSION \
--set postgresql.auth.password=$POSTGRESQL_PASSWORD \
--set postgresql.persistence.existingClaim=$POSTGRESQL_PVC
```
1. Delete the existing PostgreSQL pods and the new statefulset will create a new one:
```console
kubectl delete pod keycloak-postgresql-0
```
### To 1.0.0
[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
#### What changes were introduced in this major version?
- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
- Move dependency information from the *requirements.yaml* to the *Chart.yaml*
- After running *helm dependency update*, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock*
- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Chart.
#### Considerations when upgrading to this version
- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version does not support Helm v2 anymore.
- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3.
#### Useful links
- [Bitnami Tutorial](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-resolve-helm2-helm3-post-migration-issues-index.html)
- [Helm docs](https://helm.sh/docs/topics/v2_v3_migration)
- [Helm Blog](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3)
## License
Copyright &copy; 2024 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,104 +0,0 @@
CHART NAME: {{ .Chart.Name }}
CHART VERSION: {{ .Chart.Version }}
APP VERSION: {{ .Chart.AppVersion }}
** Please be patient while the chart is being deployed **
Keycloak can be accessed through the following DNS name from within your cluster:
{{ include "common.names.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }} (port {{ coalesce .Values.service.ports.http .Values.service.port }})
To access Keycloak from outside the cluster execute the following commands:
{{- if .Values.ingress.enabled }}
1. Get the Keycloak URL and associate its hostname to your cluster external IP:
export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters
echo "Keycloak URL: http{{ if .Values.ingress.tls }}s{{ end }}://{{ (tpl .Values.ingress.hostname .) }}/"
echo "$CLUSTER_IP {{ (tpl .Values.ingress.hostname .) }}" | sudo tee -a /etc/hosts
{{- if .Values.adminIngress.enabled }}
The admin area of Keycloak has been configured to point to a different domain ({{ .Values.adminIngress.hostname }}). Please remember to update the `frontendUrl` property of the `{{ .Values.adminRealm | default "master" }}` (or any other) realm for it to work properly (see README for an example) :
echo "Keycloak admin URL: http{{ if .Values.adminIngress.tls }}s{{ end }}://{{ (tpl .Values.adminIngress.hostname .) }}/"
echo "$CLUSTER_IP {{ (tpl .Values.adminIngress.hostname .) }}" | sudo tee -a /etc/hosts
{{- end }}
{{- else }}
1. Get the Keycloak URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export HTTP_NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[?(@.name=='http')].nodePort}" services {{ include "common.names.fullname" . }})
{{- if .Values.tls.enabled }}
export HTTPS_NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[?(@.name=='https')].nodePort}" services {{ include "common.names.fullname" . }})
{{- end }}
export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo "http://${NODE_IP}:${HTTP_NODE_PORT}/"
{{- if .Values.tls.enabled }}
echo "https://${NODE_IP}:${HTTPS_NODE_PORT}/"
{{- end }}
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch its status by running 'kubectl get --namespace {{ include "common.names.namespace" . }} svc -w {{ include "common.names.fullname" . }}'
export HTTP_SERVICE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[?(@.name=='http')].port}" services {{ include "common.names.fullname" . }})
{{- if .Values.tls.enabled }}
export HTTPS_SERVICE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[?(@.name=='https')].port}" services {{ include "common.names.fullname" . }})
{{- end }}
export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ include "common.names.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo "http://${SERVICE_IP}:${HTTP_SERVICE_PORT}/"
{{- if .Values.tls.enabled }}
echo "https://${SERVICE_IP}:${HTTPS_SERVICE_PORT}/"
{{- end }}
{{- else if contains "ClusterIP" .Values.service.type }}
export HTTP_SERVICE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[?(@.name=='http')].port}" services {{ include "common.names.fullname" . }})
{{- if .Values.tls.enabled }}
export HTTPS_SERVICE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[?(@.name=='https')].port}" services {{ include "common.names.fullname" . }})
kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ include "common.names.fullname" . }} ${HTTP_SERVICE_PORT}:${HTTP_SERVICE_PORT} ${HTTPS_SERVICE_PORT}:${HTTPS_SERVICE_PORT} &
{{- else }}
kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ include "common.names.fullname" . }} ${HTTP_SERVICE_PORT}:${HTTP_SERVICE_PORT} &
{{- end }}
echo "http://127.0.0.1:${HTTP_SERVICE_PORT}/"
{{- if .Values.tls.enabled }}
echo "https://127.0.0.1:${HTTPS_SERVICE_PORT}/"
{{- end }}
{{- end }}
{{- end }}
2. Access Keycloak using the obtained URL.
{{- if and .Values.auth.adminUser .Values.auth.adminPassword }}
3. Access the Administration Console using the following credentials:
echo Username: {{ .Values.auth.adminUser }}
echo Password: $(kubectl get secret --namespace {{ include "common.names.namespace" . }} {{ include "keycloak.secretName" . }} -o jsonpath="{.data.{{ include "keycloak.secretKey" .}}}" | base64 -d)
{{- end }}
{{- if .Values.metrics.enabled }}
You can access the Prometheus metrics following the steps below:
1. Get the Keycloak Prometheus metrics URL by running:
{{- $metricsPort := coalesce .Values.metrics.service.ports.metrics .Values.metrics.service.port | toString }}
kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ printf "%s-metrics" (include "common.names.fullname" .) }} {{ $metricsPort }}:{{ $metricsPort }} &
echo "Keycloak Prometheus metrics URL: http://127.0.0.1:{{ $metricsPort }}/metrics"
2. Open a browser and access Keycloak Prometheus metrics using the obtained URL.
{{- end }}
{{- include "keycloak.validateValues" . }}
{{- include "common.warnings.rollingTag" .Values.image }}
{{- include "common.warnings.rollingTag" .Values.keycloakConfigCli.image }}
{{- include "common.warnings.resources" (dict "sections" (list "keycloakConfigCli" "") "context" $) }}
{{- include "common.warnings.modifiedImages" (dict "images" (list .Values.image .Values.keycloakConfigCli.image) "context" $) }}

View File

@ -1,348 +0,0 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/*
Return the proper Keycloak image name
*/}}
{{- define "keycloak.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper keycloak-config-cli image name
*/}}
{{- define "keycloak.keycloakConfigCli.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.keycloakConfigCli.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the keycloak-config-cli configuration configmap.
*/}}
{{- define "keycloak.keycloakConfigCli.configmapName" -}}
{{- if .Values.keycloakConfigCli.existingConfigmap -}}
{{- printf "%s" (tpl .Values.keycloakConfigCli.existingConfigmap $) -}}
{{- else -}}
{{- printf "%s-keycloak-config-cli-configmap" (include "common.names.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a configmap object should be created for keycloak-config-cli
*/}}
{{- define "keycloak.keycloakConfigCli.createConfigmap" -}}
{{- if and .Values.keycloakConfigCli.enabled .Values.keycloakConfigCli.configuration (not .Values.keycloakConfigCli.existingConfigmap) -}}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return the proper Docker Image Registry Secret Names
*/}}
{{- define "keycloak.imagePullSecrets" -}}
{{- include "common.images.renderPullSecrets" (dict "images" (list .Values.image .Values.keycloakConfigCli.image) "context" $) -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "keycloak.postgresql.fullname" -}}
{{- include "common.names.dependency.fullname" (dict "chartName" "postgresql" "chartValues" .Values.postgresql "context" $) -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "keycloak.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "common.names.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Return the path Keycloak is hosted on. This looks at httpRelativePath and returns it with a trailing slash. For example:
/ -> / (the default httpRelativePath)
/auth -> /auth/ (trailing slash added)
/custom/ -> /custom/ (unchanged)
*/}}
{{- define "keycloak.httpPath" -}}
{{ ternary .Values.httpRelativePath (printf "%s%s" .Values.httpRelativePath "/") (hasSuffix "/" .Values.httpRelativePath) }}
{{- end -}}
{{/*
Return the Keycloak configuration configmap
*/}}
{{- define "keycloak.configmapName" -}}
{{- if .Values.existingConfigmap -}}
{{- printf "%s" (tpl .Values.existingConfigmap $) -}}
{{- else -}}
{{- printf "%s-configuration" (include "common.names.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a configmap object should be created
*/}}
{{- define "keycloak.createConfigmap" -}}
{{- if and .Values.configuration (not .Values.existingConfigmap) }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return the Database hostname
*/}}
{{- define "keycloak.databaseHost" -}}
{{- if eq .Values.postgresql.architecture "replication" }}
{{- ternary (include "keycloak.postgresql.fullname" .) (tpl .Values.externalDatabase.host $) .Values.postgresql.enabled -}}-primary
{{- else -}}
{{- ternary (include "keycloak.postgresql.fullname" .) (tpl .Values.externalDatabase.host $) .Values.postgresql.enabled -}}
{{- end -}}
{{- end -}}
{{/*
Return the Database port
*/}}
{{- define "keycloak.databasePort" -}}
{{- ternary "5432" .Values.externalDatabase.port .Values.postgresql.enabled | quote -}}
{{- end -}}
{{/*
Return the Database database name
*/}}
{{- define "keycloak.databaseName" -}}
{{- if .Values.postgresql.enabled }}
{{- if .Values.global.postgresql }}
{{- if .Values.global.postgresql.auth }}
{{- coalesce .Values.global.postgresql.auth.database .Values.postgresql.auth.database -}}
{{- else -}}
{{- .Values.postgresql.auth.database -}}
{{- end -}}
{{- else -}}
{{- .Values.postgresql.auth.database -}}
{{- end -}}
{{- else -}}
{{- .Values.externalDatabase.database -}}
{{- end -}}
{{- end -}}
{{/*
Return the Database user
*/}}
{{- define "keycloak.databaseUser" -}}
{{- if .Values.postgresql.enabled -}}
{{- if .Values.global.postgresql -}}
{{- if .Values.global.postgresql.auth -}}
{{- coalesce .Values.global.postgresql.auth.username .Values.postgresql.auth.username -}}
{{- else -}}
{{- .Values.postgresql.auth.username -}}
{{- end -}}
{{- else -}}
{{- .Values.postgresql.auth.username -}}
{{- end -}}
{{- else -}}
{{- .Values.externalDatabase.user -}}
{{- end -}}
{{- end -}}
{{/*
Return the Database encrypted password
*/}}
{{- define "keycloak.databaseSecretName" -}}
{{- if .Values.postgresql.enabled -}}
{{- if .Values.global.postgresql -}}
{{- if .Values.global.postgresql.auth -}}
{{- if .Values.global.postgresql.auth.existingSecret -}}
{{- tpl .Values.global.postgresql.auth.existingSecret $ -}}
{{- else -}}
{{- default (include "keycloak.postgresql.fullname" .) (tpl .Values.postgresql.auth.existingSecret $) -}}
{{- end -}}
{{- else -}}
{{- default (include "keycloak.postgresql.fullname" .) (tpl .Values.postgresql.auth.existingSecret $) -}}
{{- end -}}
{{- else -}}
{{- default (include "keycloak.postgresql.fullname" .) (tpl .Values.postgresql.auth.existingSecret $) -}}
{{- end -}}
{{- else -}}
{{- default (printf "%s-externaldb" .Release.Name) (tpl .Values.externalDatabase.existingSecret $) -}}
{{- end -}}
{{- end -}}
{{/*
Add environment variables to configure database values
*/}}
{{- define "keycloak.databaseSecretPasswordKey" -}}
{{- if .Values.postgresql.enabled -}}
{{- printf "%s" (.Values.postgresql.auth.secretKeys.userPasswordKey | default "password") -}}
{{- else -}}
{{- if .Values.externalDatabase.existingSecret -}}
{{- if .Values.externalDatabase.existingSecretPasswordKey -}}
{{- printf "%s" .Values.externalDatabase.existingSecretPasswordKey -}}
{{- else -}}
{{- print "db-password" -}}
{{- end -}}
{{- else -}}
{{- print "db-password" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "keycloak.databaseSecretHostKey" -}}
{{- if .Values.externalDatabase.existingSecretHostKey -}}
{{- printf "%s" .Values.externalDatabase.existingSecretHostKey -}}
{{- else -}}
{{- print "db-host" -}}
{{- end -}}
{{- end -}}
{{- define "keycloak.databaseSecretPortKey" -}}
{{- if .Values.externalDatabase.existingSecretPortKey -}}
{{- printf "%s" .Values.externalDatabase.existingSecretPortKey -}}
{{- else -}}
{{- print "db-port" -}}
{{- end -}}
{{- end -}}
{{- define "keycloak.databaseSecretUserKey" -}}
{{- if .Values.externalDatabase.existingSecretUserKey -}}
{{- printf "%s" .Values.externalDatabase.existingSecretUserKey -}}
{{- else -}}
{{- print "db-user" -}}
{{- end -}}
{{- end -}}
{{- define "keycloak.databaseSecretDatabaseKey" -}}
{{- if .Values.externalDatabase.existingSecretDatabaseKey -}}
{{- printf "%s" .Values.externalDatabase.existingSecretDatabaseKey -}}
{{- else -}}
{{- print "db-database" -}}
{{- end -}}
{{- end -}}
{{/*
Return the Keycloak initdb scripts configmap
*/}}
{{- define "keycloak.initdbScriptsCM" -}}
{{- if .Values.initdbScriptsConfigMap -}}
{{- printf "%s" .Values.initdbScriptsConfigMap -}}
{{- else -}}
{{- printf "%s-init-scripts" (include "common.names.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return the secret containing the Keycloak admin password
*/}}
{{- define "keycloak.secretName" -}}
{{- $secretName := .Values.auth.existingSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Return the secret key that contains the Keycloak admin password
*/}}
{{- define "keycloak.secretKey" -}}
{{- $secretName := .Values.auth.existingSecret -}}
{{- if and $secretName .Values.auth.passwordSecretKey -}}
{{- printf "%s" .Values.auth.passwordSecretKey -}}
{{- else -}}
{{- print "admin-password" -}}
{{- end -}}
{{- end -}}
{{/*
Return the secret containing Keycloak HTTPS/TLS certificates
*/}}
{{- define "keycloak.tlsSecretName" -}}
{{- $secretName := .Values.tls.existingSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s-crt" (include "common.names.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return the secret containing Keycloak HTTPS/TLS keystore and truststore passwords
*/}}
{{- define "keycloak.tlsPasswordsSecretName" -}}
{{- $secretName := .Values.tls.passwordsSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s-tls-passwords" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Return the secret containing Keycloak SPI TLS certificates
*/}}
{{- define "keycloak.spiPasswordsSecretName" -}}
{{- $secretName := .Values.spi.passwordsSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s-spi-passwords" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a TLS secret object should be created
*/}}
{{- define "keycloak.createTlsSecret" -}}
{{- if and .Values.tls.enabled .Values.tls.autoGenerated (not .Values.tls.existingSecret) }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Compile all warnings into a single message.
*/}}
{{- define "keycloak.validateValues" -}}
{{- $messages := list -}}
{{- $messages := append $messages (include "keycloak.validateValues.database" .) -}}
{{- $messages := append $messages (include "keycloak.validateValues.tls" .) -}}
{{- $messages := append $messages (include "keycloak.validateValues.production" .) -}}
{{- $messages := without $messages "" -}}
{{- $message := join "\n" $messages -}}
{{- if $message -}}
{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
{{- end -}}
{{- end -}}
{{/* Validate values of Keycloak - database */}}
{{- define "keycloak.validateValues.database" -}}
{{- if and (not .Values.postgresql.enabled) (not .Values.externalDatabase.host) (and (not .Values.externalDatabase.password) (not .Values.externalDatabase.existingSecret)) -}}
keycloak: database
You disabled the PostgreSQL sub-chart but did not specify an external PostgreSQL host.
Either deploy the PostgreSQL sub-chart (--set postgresql.enabled=true),
or set a value for the external database host (--set externalDatabase.host=FOO)
and set a value for the external database password (--set externalDatabase.password=BAR)
or existing secret (--set externalDatabase.existingSecret=BAR).
{{- end -}}
{{- end -}}
{{/* Validate values of Keycloak - TLS enabled */}}
{{- define "keycloak.validateValues.tls" -}}
{{- if and .Values.tls.enabled (not .Values.tls.autoGenerated) (not .Values.tls.existingSecret) }}
keycloak: tls.enabled
In order to enable TLS, you also need to provide
an existing secret containing the Keystore and Truststore or
enable auto-generated certificates.
{{- end -}}
{{- end -}}
{{/* Validate values of Keycloak - Production mode enabled */}}
{{- define "keycloak.validateValues.production" -}}
{{- if and .Values.production (not .Values.tls.enabled) (not (eq .Values.proxy "edge")) (empty .Values.proxyHeaders) -}}
keycloak: production
In order to enable Production mode, you also need to enable HTTPS/TLS
using the value 'tls.enabled' and providing an existing secret containing the Keystore and Trustore.
{{- end -}}
{{- end -}}

View File

@ -1,61 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if .Values.adminIngress.enabled }}
apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ include "common.names.fullname" . }}-admin
namespace: {{ include "common.names.namespace" . | quote }}
{{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.adminIngress.labels .Values.commonLabels ) "context" . ) }}
labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if or .Values.adminIngress.annotations .Values.commonAnnotations }}
{{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.adminIngress.annotations .Values.commonAnnotations ) "context" . ) }}
annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.adminIngress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }}
ingressClassName: {{ .Values.adminIngress.ingressClassName | quote }}
{{- end }}
rules:
{{- if .Values.adminIngress.hostname }}
- host: {{ (tpl .Values.adminIngress.hostname .) | quote }}
http:
paths:
{{- if .Values.adminIngress.extraPaths }}
{{- toYaml .Values.adminIngress.extraPaths | nindent 10 }}
{{- end }}
- path: {{ include "common.tplvalues.render" ( dict "value" .Values.adminIngress.path "context" $) }}
{{- if eq "true" (include "common.ingress.supportsPathType" .) }}
pathType: {{ .Values.adminIngress.pathType }}
{{- end }}
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.adminIngress.servicePort "context" $) | nindent 14 }}
{{- end }}
{{- range .Values.adminIngress.extraHosts }}
- host: {{ (tpl .name $) }}
http:
paths:
- path: {{ default "/" .path }}
{{- if eq "true" (include "common.ingress.supportsPathType" $) }}
pathType: {{ default "ImplementationSpecific" .pathType }}
{{- end }}
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" $.Values.adminIngress.servicePort "context" $) | nindent 14 }}
{{- end }}
{{- if .Values.adminIngress.extraRules }}
{{- include "common.tplvalues.render" (dict "value" .Values.adminIngress.extraRules "context" $) | nindent 4 }}
{{- end }}
{{- if or (and .Values.adminIngress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.adminIngress.annotations )) .Values.adminIngress.selfSigned .Values.adminIngress.secrets )) .Values.adminIngress.extraTls }}
tls:
{{- if and .Values.adminIngress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.adminIngress.annotations )) .Values.adminIngress.secrets .Values.adminIngress.selfSigned) }}
- hosts:
- {{ (tpl .Values.adminIngress.hostname .) | quote }}
secretName: {{ printf "%s-tls" (tpl .Values.adminIngress.hostname .) }}
{{- end }}
{{- if .Values.adminIngress.extraTls }}
{{- include "common.tplvalues.render" (dict "value" .Values.adminIngress.extraTls "context" $) | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,106 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-env-vars" (include "common.names.fullname" .) }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
data:
KEYCLOAK_ADMIN: {{ .Values.auth.adminUser | quote }}
KEYCLOAK_HTTP_PORT: {{ .Values.containerPorts.http | quote }}
{{- if and .Values.proxy (empty .Values.proxyHeaders) }}
KEYCLOAK_PROXY_HEADERS: {{ ternary "" "xforwarded" (eq .Values.proxy "passthrough") }}
{{- else }}
KEYCLOAK_PROXY_HEADERS: {{ .Values.proxyHeaders | quote }}
{{- end }}
{{- if and .Values.adminIngress.enabled .Values.adminIngress.hostname }}
KEYCLOAK_HOSTNAME_ADMIN: |-
{{ ternary "https://" "http://" ( or .Values.adminIngress.tls (eq .Values.proxy "edge") (not (empty .Values.proxyHeaders)) ) -}}
{{- include "common.tplvalues.render" (dict "value" .Values.adminIngress.hostname "context" $) -}}
{{- if eq .Values.adminIngress.controller "default" }}
{{- include "common.tplvalues.render" (dict "value" .Values.adminIngress.path "context" $) }}
{{- else if eq .Values.adminIngress.controller "gce" }}
{{- $path := .Values.adminIngress.path -}}
{{- if hasSuffix "*" $path -}}
{{- $path = trimSuffix "*" $path -}}
{{- end -}}
{{- include "common.tplvalues.render" (dict "value" $path "context" $) }}
{{- end }}
{{- end }}
{{- if and .Values.ingress.enabled .Values.ingress.hostname }}
KEYCLOAK_HOSTNAME: |-
{{ ternary "https://" "http://" ( or .Values.ingress.tls (eq .Values.proxy "edge") (not (empty .Values.proxyHeaders)) ) -}}
{{- include "common.tplvalues.render" (dict "value" .Values.ingress.hostname "context" $) -}}
{{- if eq .Values.ingress.controller "default" }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingress.path "context" $) }}
{{- else if eq .Values.ingress.controller "gce" }}
{{- $path := .Values.ingress.path -}}
{{- if hasSuffix "*" $path -}}
{{- $path = trimSuffix "*" $path -}}
{{- end -}}
{{- include "common.tplvalues.render" (dict "value" $path "context" $) }}
{{- end }}
{{- end }}
{{- if .Values.ingress.enabled }}
KEYCLOAK_HOSTNAME_STRICT: {{ ternary "true" "false" .Values.ingress.hostnameStrict | quote }}
{{- end }}
KEYCLOAK_ENABLE_STATISTICS: {{ ternary "true" "false" .Values.metrics.enabled | quote }}
{{- if not .Values.externalDatabase.existingSecretHostKey }}
KEYCLOAK_DATABASE_HOST: {{ include "keycloak.databaseHost" . | quote }}
{{- end }}
{{- if not .Values.externalDatabase.existingSecretPortKey }}
KEYCLOAK_DATABASE_PORT: {{ include "keycloak.databasePort" . }}
{{- end }}
{{- if not .Values.externalDatabase.existingSecretDatabaseKey }}
KEYCLOAK_DATABASE_NAME: {{ include "keycloak.databaseName" . | quote }}
{{- end }}
{{- if not .Values.externalDatabase.existingSecretUserKey }}
KEYCLOAK_DATABASE_USER: {{ include "keycloak.databaseUser" . | quote }}
{{- end }}
KEYCLOAK_PRODUCTION: {{ ternary "true" "false" .Values.production | quote }}
KEYCLOAK_ENABLE_HTTPS: {{ ternary "true" "false" .Values.tls.enabled | quote }}
{{- if .Values.customCaExistingSecret }}
KC_TRUSTSTORE_PATHS: "/opt/bitnami/keycloak/custom-ca"
{{- end }}
{{- if .Values.tls.enabled }}
KEYCLOAK_HTTPS_PORT: {{ .Values.containerPorts.https | quote }}
KEYCLOAK_HTTPS_USE_PEM: {{ ternary "true" "false" (or .Values.tls.usePem .Values.tls.autoGenerated) | quote }}
{{- if or .Values.tls.usePem .Values.tls.autoGenerated }}
KEYCLOAK_HTTPS_CERTIFICATE_FILE: "/opt/bitnami/keycloak/certs/tls.crt"
KEYCLOAK_HTTPS_CERTIFICATE_KEY_FILE: "/opt/bitnami/keycloak/certs/tls.key"
{{- else }}
KEYCLOAK_HTTPS_KEY_STORE_FILE: {{ printf "/opt/bitnami/keycloak/certs/%s" .Values.tls.keystoreFilename | quote }}
KEYCLOAK_HTTPS_TRUST_STORE_FILE: {{ printf "/opt/bitnami/keycloak/certs/%s" .Values.tls.truststoreFilename | quote }}
{{- end }}
{{- end }}
{{- if .Values.spi.existingSecret }}
{{- if .Values.spi.hostnameVerificationPolicy }}
KEYCLOAK_SPI_TRUSTSTORE_FILE_HOSTNAME_VERIFICATION_POLICY: {{ .Values.spi.hostnameVerificationPolicy | quote }}
{{- end }}
KEYCLOAK_SPI_TRUSTSTORE_FILE: {{ printf "/opt/bitnami/keycloak/spi-certs/%s" .Values.spi.truststoreFilename }}
{{- end }}
{{- if .Values.cache.enabled }}
KEYCLOAK_CACHE_TYPE: "ispn"
{{- if .Values.cache.stackName }}
KEYCLOAK_CACHE_STACK: {{ .Values.cache.stackName | quote }}
{{- end }}
{{- if .Values.cache.stackFile }}
KEYCLOAK_CACHE_CONFIG_FILE: {{ .Values.cache.stackFile | quote }}
{{- end }}
JAVA_OPTS_APPEND: {{ printf "-Djgroups.dns.query=%s-headless.%s.svc.%s" (include "common.names.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain | quote }}
{{- else }}
KEYCLOAK_CACHE_TYPE: "local"
{{- end }}
{{- if .Values.logging }}
KEYCLOAK_LOG_OUTPUT: {{ .Values.logging.output | quote }}
KEYCLOAK_LOG_LEVEL: {{ .Values.logging.level | quote }}
{{- end }}

View File

@ -1,20 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if (include "keycloak.createConfigmap" .) }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-configuration" (include "common.names.fullname" .) }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
data:
keycloak.conf: |-
{{- .Values.configuration | nindent 4 }}
{{- end }}

View File

@ -1,9 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- range .Values.extraDeploy }}
---
{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
{{- end }}

View File

@ -1,40 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
apiVersion: v1
kind: Service
metadata:
name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if or .Values.commonAnnotations .Values.service.headless.annotations }}
{{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.annotations .Values.commonAnnotations ) "context" . ) }}
annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: ClusterIP
clusterIP: None
ports:
- name: http
port: {{ .Values.containerPorts.http }}
protocol: TCP
targetPort: http
{{- if .Values.tls.enabled }}
- name: https
port: {{ .Values.containerPorts.https }}
protocol: TCP
targetPort: https
{{- end }}
{{- if .Values.service.extraHeadlessPorts }}
{{- include "common.tplvalues.render" (dict "value" .Values.service.extraHeadlessPorts "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.service.headless.extraPorts }}
{{- include "common.tplvalues.render" (dict "value" .Values.service.headless.extraPorts "context" $) | nindent 4 }}
{{- end }}
publishNotReadyAddresses: true
{{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }}
selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak

View File

@ -1,66 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if .Values.autoscaling.enabled }}
apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ template "common.names.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
scaleTargetRef:
apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
name: {{ template "common.names.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPU }}
- type: Resource
resource:
name: cpu
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
targetAverageUtilization: {{ .Values.autoscaling.targetCPU }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPU }}
{{- end }}
{{- end }}
{{- if .Values.autoscaling.targetMemory }}
- type: Resource
resource:
name: memory
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
targetAverageUtilization: {{ .Values.autoscaling.targetMemory }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemory }}
{{- end }}
{{- end }}
{{- if or .Values.autoscaling.behavior.scaleDown.policies .Values.autoscaling.behavior.scaleUp.policies }}
behavior:
{{- if .Values.autoscaling.behavior.scaleDown.policies }}
scaleDown:
stabilizationWindowSeconds: {{ .Values.autoscaling.behavior.scaleDown.stabilizationWindowSeconds }}
selectPolicy: {{ .Values.autoscaling.behavior.scaleDown.selectPolicy }}
policies:
{{- toYaml .Values.autoscaling.behavior.scaleDown.policies | nindent 8 }}
{{- end }}
{{- if .Values.autoscaling.behavior.scaleUp.policies }}
scaleUp:
stabilizationWindowSeconds: {{ .Values.autoscaling.behavior.scaleUp.stabilizationWindowSeconds }}
selectPolicy: {{ .Values.autoscaling.behavior.scaleUp.selectPolicy }}
policies:
{{- toYaml .Values.autoscaling.behavior.scaleUp.policies | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,61 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if .Values.ingress.enabled }}
apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ include "common.names.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
{{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingress.labels .Values.commonLabels ) "context" . ) }}
labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if or .Values.ingress.annotations .Values.commonAnnotations }}
{{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingress.annotations .Values.commonAnnotations ) "context" . ) }}
annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }}
ingressClassName: {{ .Values.ingress.ingressClassName | quote }}
{{- end }}
rules:
{{- if .Values.ingress.hostname }}
- host: {{ (tpl .Values.ingress.hostname .) | quote }}
http:
paths:
{{- if .Values.ingress.extraPaths }}
{{- toYaml .Values.ingress.extraPaths | nindent 10 }}
{{- end }}
- path: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.path "context" $) }}
{{- if eq "true" (include "common.ingress.supportsPathType" .) }}
pathType: {{ .Values.ingress.pathType }}
{{- end }}
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.ingress.servicePort "context" $) | nindent 14 }}
{{- end }}
{{- range .Values.ingress.extraHosts }}
- host: {{ (tpl .name $) }}
http:
paths:
- path: {{ default "/" .path }}
{{- if eq "true" (include "common.ingress.supportsPathType" $) }}
pathType: {{ default "ImplementationSpecific" .pathType }}
{{- end }}
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" $.Values.ingress.servicePort "context" $) | nindent 14 }}
{{- end }}
{{- if .Values.ingress.extraRules }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraRules "context" $) | nindent 4 }}
{{- end }}
{{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned .Values.ingress.secrets )) .Values.ingress.extraTls }}
tls:
{{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.secrets .Values.ingress.selfSigned) }}
- hosts:
- {{ (tpl .Values.ingress.hostname .) | quote }}
secretName: {{ printf "%s-tls" (tpl .Values.ingress.hostname .) }}
{{- end }}
{{- if .Values.ingress.extraTls }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,19 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if and .Values.initdbScripts (not .Values.initdbScriptsConfigMap) }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-init-scripts" (include "common.names.fullname" .) }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
data:
{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }}
{{ end }}

View File

@ -1,23 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if (include "keycloak.keycloakConfigCli.createConfigmap" .) }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "keycloak.keycloakConfigCli.configmapName" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak-config-cli
data:
{{- range $fileName, $fileContent := .Values.keycloakConfigCli.configuration }}
{{- if $fileContent }}
{{ $fileName }}: |
{{- include "common.tplvalues.render" (dict "value" $fileContent "context" $) | nindent 4 }}
{{- else }}
{{- ($.Files.Glob $fileName).AsConfig | nindent 2 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,138 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if .Values.keycloakConfigCli.enabled }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ printf "%s-keycloak-config-cli" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak-config-cli
{{- if or .Values.keycloakConfigCli.annotations .Values.commonAnnotations }}
{{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.keycloakConfigCli.annotations .Values.commonAnnotations ) "context" . ) }}
annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
{{- end }}
spec:
backoffLimit: {{ .Values.keycloakConfigCli.backoffLimit }}
{{- if .Values.keycloakConfigCli.cleanupAfterFinished.enabled }}
ttlSecondsAfterFinished: {{ .Values.keycloakConfigCli.cleanupAfterFinished.seconds }}
{{- end }}
template:
metadata:
{{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.keycloakConfigCli.podLabels .Values.commonLabels ) "context" . ) }}
labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }}
app.kubernetes.io/component: keycloak-config-cli
annotations:
{{- if (include "keycloak.keycloakConfigCli.createConfigmap" .) }}
checksum/configuration: {{ include (print $.Template.BasePath "/keycloak-config-cli-configmap.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.keycloakConfigCli.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.podAnnotations "context" $) | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "keycloak.serviceAccountName" . }}
{{- include "keycloak.imagePullSecrets" . | nindent 6 }}
restartPolicy: Never
{{- if .Values.keycloakConfigCli.podSecurityContext.enabled }}
securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.keycloakConfigCli.podSecurityContext "context" $) | nindent 8 }}
{{- end }}
automountServiceAccountToken: {{ .Values.keycloakConfigCli.automountServiceAccountToken }}
{{- if .Values.keycloakConfigCli.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.keycloakConfigCli.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.keycloakConfigCli.podTolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.podTolerations "context" .) | nindent 8 }}
{{- end }}
{{- if .Values.keycloakConfigCli.initContainers }}
initContainers:
{{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.initContainers "context" $) | nindent 8 }}
{{- end }}
containers:
- name: keycloak-config-cli
image: {{ template "keycloak.keycloakConfigCli.image" . }}
imagePullPolicy: {{ .Values.keycloakConfigCli.image.pullPolicy }}
{{- if .Values.keycloakConfigCli.command }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.command "context" $) | nindent 12 }}
{{- else }}
command:
- java
- -jar
- /opt/bitnami/keycloak-config-cli/keycloak-config-cli.jar
{{- end }}
{{- if .Values.keycloakConfigCli.args }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.args "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.keycloakConfigCli.containerSecurityContext.enabled }}
securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.keycloakConfigCli.containerSecurityContext "context" $) | nindent 12 }}
{{- end }}
env:
- name: KEYCLOAK_URL
value: {{ printf "http://%s-headless:%d%s" (include "common.names.fullname" .) (.Values.containerPorts.http | int) (.Values.httpRelativePath) }}
- name: KEYCLOAK_USER
value: {{ .Values.auth.adminUser | quote }}
- name: KEYCLOAK_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "keycloak.secretName" . }}
key: {{ include "keycloak.secretKey" . }}
{{- if or .Values.keycloakConfigCli.configuration .Values.keycloakConfigCli.existingConfigmap }}
- name: IMPORT_FILES_LOCATIONS
value: /config/*
{{- end }}
- name: KEYCLOAK_AVAILABILITYCHECK_ENABLED
value: "true"
{{- if .Values.keycloakConfigCli.extraEnvVars }}
{{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.extraEnvVars "context" $) | nindent 12 }}
{{- end }}
{{- if or .Values.keycloakConfigCli.extraEnvVarsCM .Values.keycloakConfigCli.extraEnvVarsSecret }}
envFrom:
{{- if .Values.keycloakConfigCli.extraEnvVarsCM }}
- configMapRef:
name: {{ include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.extraEnvVarsCM "context" $) }}
{{- end }}
{{- if .Values.keycloakConfigCli.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.extraEnvVarsSecret "context" $) }}
{{- end }}
{{- end }}
{{- if or .Values.keycloakConfigCli.configuration .Values.keycloakConfigCli.existingConfigmap .Values.keycloakConfigCli.extraVolumeMounts }}
volumeMounts:
- name: empty-dir
mountPath: /tmp
subPath: tmp-dir
{{- if or .Values.keycloakConfigCli.configuration .Values.keycloakConfigCli.existingConfigmap }}
- name: config-volume
mountPath: /config
{{- end }}
{{- if .Values.keycloakConfigCli.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.keycloakConfigCli.resources }}
resources: {{- toYaml .Values.keycloakConfigCli.resources | nindent 12 }}
{{- else if ne .Values.keycloakConfigCli.resourcesPreset "none" }}
resources: {{- include "common.resources.preset" (dict "type" .Values.keycloakConfigCli.resourcesPreset) | nindent 12 }}
{{- end }}
{{- if .Values.keycloakConfigCli.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.keycloakConfigCli.sidecars "context" $) | nindent 8 }}
{{- end }}
{{- if or .Values.keycloakConfigCli.configuration .Values.keycloakConfigCli.existingConfigmap .Values.keycloakConfigCli.extraVolumes }}
volumes:
- name: empty-dir
emptyDir: {}
{{- if or .Values.keycloakConfigCli.configuration .Values.keycloakConfigCli.existingConfigmap }}
- name: config-volume
configMap:
name: {{ include "keycloak.keycloakConfigCli.configmapName" . }}
{{- end }}
{{- if .Values.keycloakConfigCli.extraVolumes }}
{{- include "common.tplvalues.render" (dict "value" .Values.keycloakConfigCli.extraVolumes "context" $) | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,41 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if .Values.metrics.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ printf "%s-metrics" (include "common.names.fullname" .) }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: metrics
{{- if or .Values.metrics.service.annotations .Values.commonAnnotations }}
{{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.service.annotations .Values.commonAnnotations ) "context" . ) }}
annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: ClusterIP
ports:
- name: metrics
port: {{ .Values.metrics.service.ports.metrics }}
protocol: TCP
targetPort: {{ .Values.containerPorts.metrics }}
- name: http
port: {{ .Values.metrics.service.ports.http }}
protocol: TCP
targetPort: {{ .Values.containerPorts.http }}
{{- if .Values.tls.enabled }}
- name: https
port: {{ .Values.metrics.service.ports.https }}
protocol: TCP
targetPort: {{ .Values.containerPorts.https }}
{{- end }}
{{- if .Values.metrics.service.extraPorts }}
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.service.extraPorts "context" $) | nindent 4 }}
{{- end }}
{{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }}
selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- end }}

View File

@ -1,102 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if .Values.networkPolicy.enabled }}
kind: NetworkPolicy
apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
metadata:
name: {{ template "common.names.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
{{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }}
podSelector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }}
app.kubernetes.io/component: keycloak
policyTypes:
- Ingress
- Egress
{{- if .Values.networkPolicy.allowExternalEgress }}
egress:
- {}
{{- else }}
egress:
- ports:
# Allow dns resolution
- port: 53
protocol: UDP
- port: 53
protocol: TCP
{{- range $port := .Values.networkPolicy.kubeAPIServerPorts }}
- port: {{ $port }}
{{- end }}
# Allow connection to PostgreSQL
- ports:
- port: {{ include "keycloak.databasePort" . | trimAll "\"" | int }}
{{- if .Values.postgresql.enabled }}
to:
- podSelector:
matchLabels:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
# Allow connection to other keycloak nodes
- ports:
{{- /* Constant in code: https://github.com/keycloak/keycloak/blob/ce8e925c1ad9bf7a3180d1496e181aeea0ab5f8a/operator/src/main/java/org/keycloak/operator/Constants.java#L60 */}}
- port: 7800
- port: {{ .Values.containerPorts.http }}
{{- if .Values.tls.enabled }}
- port: {{ .Values.containerPorts.https }}
{{- end }}
to:
- podSelector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }}
app.kubernetes.io/component: keycloak
{{- if .Values.networkPolicy.extraEgress }}
{{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraEgress "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
ingress:
- ports:
{{- /* Constant in code: https://github.com/keycloak/keycloak/blob/ce8e925c1ad9bf7a3180d1496e181aeea0ab5f8a/operator/src/main/java/org/keycloak/operator/Constants.java#L60 */}}
- port: 7800
{{- if and (.Values.metrics.enabled) (not (eq (.Values.containerPorts.http | int) (.Values.containerPorts.metrics | int) )) }}
- port: {{ .Values.containerPorts.metrics }} # metrics and health
{{- end }}
- port: {{ .Values.containerPorts.http }}
{{- if .Values.tls.enabled }}
- port: {{ .Values.containerPorts.https }}
{{- end }}
{{- if not .Values.networkPolicy.allowExternal }}
from:
- podSelector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }}
- podSelector:
matchLabels:
{{ template "common.names.fullname" . }}-client: "true"
{{- if .Values.networkPolicy.ingressNSMatchLabels }}
- namespaceSelector:
matchLabels:
{{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }}
{{ $key | quote }}: {{ $value | quote }}
{{- end }}
{{- if .Values.networkPolicy.ingressNSPodMatchLabels }}
podSelector:
matchLabels:
{{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }}
{{ $key | quote }}: {{ $value | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- $extraIngress := coalesce .Values.networkPolicy.additionalRules .Values.networkPolicy.extraIngress }}
{{- if $extraIngress }}
{{- include "common.tplvalues.render" ( dict "value" $extraIngress "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,28 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if .Values.pdb.create }}
apiVersion: {{ include "common.capabilities.policy.apiVersion" . }}
kind: PodDisruptionBudget
metadata:
name: {{ template "common.names.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
{{- if .Values.pdb.minAvailable }}
minAvailable: {{ .Values.pdb.minAvailable }}
{{- end }}
{{- if or .Values.pdb.maxUnavailable ( not .Values.pdb.minAvailable ) }}
maxUnavailable: {{ .Values.pdb.maxUnavailable | default 1 }}
{{- end }}
{{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }}
selector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }}
app.kubernetes.io/component: keycloak
{{- end }}

View File

@ -1,20 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.groups}}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ template "common.names.fullname" . }}
namespace: {{ default (include "common.names.namespace" .) .Values.metrics.prometheusRule.namespace }}
{{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.prometheusRule.labels .Values.commonLabels ) "context" . ) }}
labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
groups: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.groups "context" .) | nindent 4 }}
{{- end }}

View File

@ -1,28 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if and .Values.serviceAccount.create .Values.rbac.create }}
kind: Role
apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
metadata:
name: {{ template "common.names.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
rules:
{{- if .Values.rbac.rules }}
{{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }}
{{- end }}
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
{{- end }}

View File

@ -1,25 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if and .Values.serviceAccount.create .Values.rbac.create }}
kind: RoleBinding
apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
metadata:
name: {{ template "common.names.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "common.names.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "keycloak.serviceAccountName" . }}
namespace: {{ include "common.names.namespace" . | quote }}
{{- end }}

View File

@ -1,19 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if and (not .Values.postgresql.enabled) (not .Values.externalDatabase.existingSecret) (not .Values.postgresql.existingSecret) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-externaldb" .Release.Name }}
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }}
{{- if or .Values.externalDatabase.annotations .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.merge" (dict "values" (list .Values.externalDatabase.annotations .Values.commonAnnotations) "context" $) | nindent 4 }}
{{- end }}
type: Opaque
data:
db-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-externaldb" .Release.Name) "key" "db-password" "length" 10 "providedValues" (list "externalDatabase.password") "context" $) }}
{{- end }}

View File

@ -1,20 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if not .Values.auth.existingSecret }}
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if or .Values.auth.annotations .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.merge" (dict "values" (list .Values.auth.annotations .Values.commonAnnotations) "context" $) | nindent 4 }}
{{- end }}
type: Opaque
data:
admin-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-") "key" "admin-password" "length" 10 "providedValues" (list "auth.adminPassword") "context" $) }}
{{- end }}

View File

@ -1,65 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
apiVersion: v1
kind: Service
metadata:
name: {{ template "common.names.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if or .Values.service.annotations .Values.commonAnnotations }}
{{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }}
annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
{{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }}
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}
{{- end }}
{{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }}
loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }}
{{- end }}
{{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if .Values.service.sessionAffinity }}
sessionAffinity: {{ .Values.service.sessionAffinity }}
{{- end }}
{{- if .Values.service.sessionAffinityConfig }}
sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }}
{{- end }}
ports:
{{- if .Values.service.http.enabled }}
- name: http
port: {{ coalesce .Values.service.ports.http .Values.service.port }}
protocol: TCP
targetPort: http
{{- if (and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.http))) }}
nodePort: {{ .Values.service.nodePorts.http }}
{{- else if eq .Values.service.type "ClusterIP" }}
nodePort: null
{{- end }}
{{- end }}
{{- if .Values.tls.enabled }}
- name: https
port: {{ coalesce .Values.service.ports.https .Values.service.httpsPort }}
protocol: TCP
targetPort: https
{{- if (and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.https))) }}
nodePort: {{ .Values.service.nodePorts.https }}
{{- else if eq .Values.service.type "ClusterIP" }}
nodePort: null
{{- end }}
{{- end }}
{{- if .Values.service.extraPorts }}
{{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }}
{{- end }}
{{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }}
selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak

View File

@ -1,22 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "keycloak.serviceAccountName" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.serviceAccount.extraLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.serviceAccount.extraLabels "context" $) | nindent 4 }}
{{- end }}
{{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }}
{{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }}
annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
{{- end }}

View File

@ -1,58 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "common.names.fullname" . }}
namespace: {{ default (include "common.names.namespace" .) .Values.metrics.serviceMonitor.namespace }}
{{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }}
labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
{{- if .Values.metrics.serviceMonitor.jobLabel }}
jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }}
{{- end }}
endpoints:
{{- $defaultEndpoint := pick .Values.metrics.serviceMonitor "port" "scheme" "tlsConfig" "interval" "scrapeTimeout" "relabelings" "metricRelabelings" "honorLabels" }}
{{- $endpoints := ternary (.Values.metrics.serviceMonitor.endpoints) (list (dict "path" .Values.metrics.serviceMonitor.path)) (empty .Values.metrics.serviceMonitor.path) }}
{{- range $endpoints }}
{{- $endpoint := merge . $defaultEndpoint }}
- port: {{ $endpoint.port | quote }}
scheme: {{ $endpoint.scheme | quote }}
{{- if $endpoint.tlsConfig }}
tlsConfig: {{- include "common.tplvalues.render" ( dict "value" $endpoint.tlsConfig "context" $) | nindent 8 }}
{{- end }}
path: {{ include "common.tplvalues.render" ( dict "value" $endpoint.path "context" $) }}
{{- if $endpoint.interval }}
interval: {{ $endpoint.interval }}
{{- end }}
{{- if $endpoint.scrapeTimeout }}
scrapeTimeout: {{ $endpoint.scrapeTimeout }}
{{- end }}
{{- if $endpoint.relabelings }}
relabelings: {{- include "common.tplvalues.render" ( dict "value" $endpoint.relabelings "context" $) | nindent 6 }}
{{- end }}
{{- if $endpoint.metricRelabelings }}
metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" $endpoint.metricRelabelings "context" $) | nindent 6 }}
{{- end }}
{{- if $endpoint.honorLabels }}
honorLabels: {{ $endpoint.honorLabels }}
{{- end }}
{{- end }}
namespaceSelector:
matchNames:
- {{ include "common.names.namespace" . | quote }}
selector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }}
{{- if .Values.metrics.serviceMonitor.selector }}
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }}
{{- end }}
app.kubernetes.io/component: metrics
{{- end }}

View File

@ -1,371 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ template "common.names.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if or .Values.statefulsetAnnotations .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.merge" ( dict "values" ( list .Values.statefulsetAnnotations .Values.commonAnnotations ) "context" $ ) | nindent 4 }}
{{- end }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimitCount }}
podManagementPolicy: {{ .Values.podManagementPolicy }}
serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}
updateStrategy:
{{- include "common.tplvalues.render" (dict "value" .Values.updateStrategy "context" $ ) | nindent 4 }}
{{- if .Values.minReadySeconds }}
minReadySeconds: {{ .Values.minReadySeconds }}
{{- end }}
{{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }}
selector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }}
app.kubernetes.io/component: keycloak
template:
metadata:
annotations:
checksum/configmap-env-vars: {{ include (print $.Template.BasePath "/configmap-env-vars.yaml") . | sha256sum }}
{{- if not .Values.auth.existingSecret }}
checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
{{- end }}
{{- if (include "keycloak.createConfigmap" .) }}
checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }}
{{- end }}
labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }}
app.kubernetes.io/component: keycloak
spec:
serviceAccountName: {{ template "keycloak.serviceAccountName" . }}
{{- include "keycloak.imagePullSecrets" . | nindent 6 }}
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
{{- if .Values.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.affinity }}
affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }}
{{- end }}
{{- if .Values.topologySpreadConstraints }}
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName | quote }}
{{- end }}
{{- if .Values.schedulerName }}
schedulerName: {{ .Values.schedulerName }}
{{- end }}
{{- if .Values.podSecurityContext.enabled }}
securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.podSecurityContext "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.dnsPolicy }}
dnsPolicy: {{ .Values.dnsPolicy }}
{{- end }}
{{- if .Values.dnsConfig }}
dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.dnsConfig "context" .) | nindent 8 }}
{{- end }}
{{- if semverCompare ">= 1.13" (include "common.capabilities.kubeVersion" .) }}
enableServiceLinks: {{ .Values.enableServiceLinks }}
{{- end }}
{{- if .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
{{- end }}
{{- if or .Values.enableDefaultInitContainers .Values.initContainers }}
initContainers:
{{- if .Values.enableDefaultInitContainers }}
- name: prepare-write-dirs
image: {{ template "keycloak.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- /bin/bash
args:
- -ec
- |
. /opt/bitnami/scripts/liblog.sh
info "Copying writable dirs to empty dir"
# In order to not break the application functionality we need to make some
# directories writable, so we need to copy it to an empty dir volume
cp -r --preserve=mode /opt/bitnami/keycloak/lib/quarkus /emptydir/app-quarkus-dir
cp -r --preserve=mode /opt/bitnami/keycloak/data /emptydir/app-data-dir
cp -r --preserve=mode /opt/bitnami/keycloak/providers /emptydir/app-providers-dir
info "Copy operation completed"
{{- if .Values.containerSecurityContext.enabled }}
securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.resources }}
resources: {{- toYaml .Values.resources | nindent 12 }}
{{- else if ne .Values.resourcesPreset "none" }}
resources: {{- include "common.resources.preset" (dict "type" .Values.resourcesPreset) | nindent 12 }}
{{- end }}
volumeMounts:
- name: empty-dir
mountPath: /emptydir
{{- end }}
{{- if .Values.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- end }}
containers:
- name: keycloak
image: {{ template "keycloak.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.lifecycleHooks }}
lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.containerSecurityContext.enabled }}
securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
{{- else if .Values.command }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else if .Values.args }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }}
{{- end }}
env:
- name: KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" .Values.image.debug | quote }}
- name: KEYCLOAK_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "keycloak.secretName" . }}
key: {{ include "keycloak.secretKey" . }}
- name: KEYCLOAK_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "keycloak.databaseSecretName" . }}
key: {{ include "keycloak.databaseSecretPasswordKey" . }}
{{- if .Values.externalDatabase.existingSecretHostKey }}
- name: KEYCLOAK_DATABASE_HOST
valueFrom:
secretKeyRef:
name: {{ include "keycloak.databaseSecretName" . }}
key: {{ include "keycloak.databaseSecretHostKey" . }}
{{- end }}
{{- if .Values.externalDatabase.existingSecretPortKey }}
- name: KEYCLOAK_DATABASE_PORT
valueFrom:
secretKeyRef:
name: {{ include "keycloak.databaseSecretName" . }}
key: {{ include "keycloak.databaseSecretPortKey" . }}
{{- end }}
{{- if .Values.externalDatabase.existingSecretUserKey }}
- name: KEYCLOAK_DATABASE_USER
valueFrom:
secretKeyRef:
name: {{ include "keycloak.databaseSecretName" . }}
key: {{ include "keycloak.databaseSecretUserKey" . }}
{{- end }}
{{- if .Values.externalDatabase.existingSecretDatabaseKey }}
- name: KEYCLOAK_DATABASE_NAME
valueFrom:
secretKeyRef:
name: {{ include "keycloak.databaseSecretName" . }}
key: {{ include "keycloak.databaseSecretDatabaseKey" . }}
{{- end }}
{{- if and .Values.tls.enabled (or .Values.tls.keystorePassword .Values.tls.passwordsSecret) }}
- name: KEYCLOAK_HTTPS_KEY_STORE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "keycloak.tlsPasswordsSecretName" . }}
key: "tls-keystore-password"
{{- end }}
{{- if and .Values.tls.enabled (or .Values.tls.truststorePassword .Values.tls.passwordsSecret) }}
- name: KEYCLOAK_HTTPS_TRUST_STORE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "keycloak.tlsPasswordsSecretName" . }}
key: "tls-truststore-password"
{{- end }}
{{- if and .Values.spi.existingSecret (or .Values.spi.truststorePassword .Values.spi.passwordsSecret) }}
- name: KEYCLOAK_SPI_TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "keycloak.spiPasswordsSecretName" . }}
key: "spi-truststore-password"
{{- end }}
- name: KEYCLOAK_HTTP_RELATIVE_PATH
value: {{ .Values.httpRelativePath | quote }}
{{- if .Values.extraStartupArgs }}
- name: KEYCLOAK_EXTRA_ARGS
value: {{ .Values.extraStartupArgs | quote }}
{{- end }}
{{- if .Values.adminRealm }}
- name: KC_SPI_ADMIN_REALM
value: "{{ .Values.adminRealm }}"
{{- end }}
{{- if .Values.extraEnvVars }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }}
{{- end }}
envFrom:
- configMapRef:
name: {{ printf "%s-env-vars" (include "common.names.fullname" .) }}
{{- if .Values.extraEnvVarsCM }}
- configMapRef:
name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }}
{{- end }}
{{- if .Values.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }}
{{- end }}
{{- if .Values.resources }}
resources: {{- toYaml .Values.resources | nindent 12 }}
{{- else if ne .Values.resourcesPreset "none" }}
resources: {{- include "common.resources.preset" (dict "type" .Values.resourcesPreset) | nindent 12 }}
{{- end }}
ports:
- name: http
containerPort: {{ .Values.containerPorts.http }}
protocol: TCP
{{- if .Values.tls.enabled }}
- name: https
containerPort: {{ .Values.containerPorts.https }}
protocol: TCP
{{- end }}
{{- if and (.Values.metrics.enabled) (not (eq (.Values.containerPorts.http | int) (.Values.containerPorts.metrics | int) )) }}
- name: metrics
containerPort: {{ .Values.containerPorts.metrics }}
protocol: TCP
{{- end}}
{{- /* Constant in code: https://github.com/keycloak/keycloak/blob/ce8e925c1ad9bf7a3180d1496e181aeea0ab5f8a/operator/src/main/java/org/keycloak/operator/Constants.java#L60 */}}
- name: discovery
containerPort: 7800
{{- if .Values.extraContainerPorts }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraContainerPorts "context" $) | nindent 12 }}
{{- end }}
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.customStartupProbe }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }}
{{- else if .Values.startupProbe.enabled }}
startupProbe: {{- omit .Values.startupProbe "enabled" | toYaml | nindent 12 }}
httpGet:
path: {{ .Values.httpRelativePath }}
port: http
{{- end }}
{{- if .Values.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }}
{{- else if .Values.livenessProbe.enabled }}
livenessProbe: {{- omit .Values.livenessProbe "enabled" | toYaml | nindent 12 }}
tcpSocket:
port: http
{{- end }}
{{- if .Values.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }}
{{- else if .Values.readinessProbe.enabled }}
readinessProbe: {{- omit .Values.readinessProbe "enabled" | toYaml | nindent 12 }}
httpGet:
path: {{ .Values.httpRelativePath }}realms/{{ .Values.adminRealm | default "master" }}
port: http
{{- end }}
{{- end }}
volumeMounts:
- name: empty-dir
mountPath: /tmp
subPath: tmp-dir
- name: empty-dir
mountPath: /bitnami/keycloak
subPath: app-volume-dir
- name: empty-dir
mountPath: /opt/bitnami/keycloak/conf
subPath: app-conf-dir
- name: empty-dir
mountPath: /opt/bitnami/keycloak/lib/quarkus
subPath: app-quarkus-dir
- name: empty-dir
mountPath: /opt/bitnami/keycloak/data
subPath: app-data-dir
- name: empty-dir
mountPath: /opt/bitnami/keycloak/providers
subPath: app-providers-dir
{{- if or .Values.configuration .Values.existingConfigmap }}
- name: keycloak-config
mountPath: /bitnami/keycloak/conf/keycloak.conf
subPath: keycloak.conf
{{- end }}
{{- if .Values.tls.enabled }}
- name: certificates
mountPath: /opt/bitnami/keycloak/certs
readOnly: true
{{- end }}
{{- if .Values.customCaExistingSecret }}
- name: custom-ca
mountPath: /opt/bitnami/keycloak/custom-ca
readOnly: true
{{- end }}
{{- if .Values.spi.existingSecret }}
- name: spi-certificates
mountPath: /opt/bitnami/keycloak/spi-certs
readOnly: true
{{- end }}
{{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }}
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
- name: empty-dir
emptyDir: {}
{{- if or .Values.configuration .Values.existingConfigmap }}
- name: keycloak-config
configMap:
name: {{ include "keycloak.configmapName" . }}
{{- end }}
{{- if .Values.tls.enabled }}
- name: certificates
secret:
secretName: {{ include "keycloak.tlsSecretName" . }}
defaultMode: 420
{{- end }}
{{- if .Values.customCaExistingSecret }}
- name: custom-ca
secret:
secretName: {{ .Values.customCaExistingSecret }}
defaultMode: 420
{{- end }}
{{- if .Values.spi.existingSecret }}
- name: spi-certificates
secret:
secretName: {{ .Values.spi.existingSecret }}
defaultMode: 420
{{- end }}
{{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }}
- name: custom-init-scripts
configMap:
name: {{ include "keycloak.initdbScriptsCM" . }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }}
{{- end }}

View File

@ -1,43 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if and (or .Values.tls.keystorePassword .Values.tls.truststorePassword) (not .Values.tls.passwordsSecret) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-tls-passwords" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- if .Values.tls.keystorePassword }}
tls-keystore-password: {{ .Values.tls.keystorePassword | b64enc | quote }}
{{- end }}
{{- if .Values.tls.truststorePassword }}
tls-truststore-password: {{ .Values.tls.truststorePassword | b64enc | quote }}
{{- end }}
---
{{- end }}
{{- if and .Values.spi.truststorePassword (not .Values.spi.passwordsSecret) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-spi-passwords" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- if .Values.spi.truststorePassword }}
spi-truststore-password: {{ .Values.spi.truststorePassword | b64enc | quote }}
{{- end }}
{{- end }}

View File

@ -1,71 +0,0 @@
{{- /*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if .Values.ingress.enabled }}
{{- if .Values.ingress.secrets }}
{{- range .Values.ingress.secrets }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }}
namespace: {{ include "common.names.namespace" $ | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }}
{{- if $.Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: kubernetes.io/tls
data:
tls.crt: {{ include "common.tplvalues.render" ( dict "value" .certificate "context" $ ) | b64enc }}
tls.key: {{ include "common.tplvalues.render" ( dict "value" .key "context" $ ) | b64enc }}
---
{{- end }}
{{- end }}
{{- if and .Values.ingress.tls .Values.ingress.selfSigned }}
{{- $secretName := printf "%s-tls" .Values.ingress.hostname }}
{{- $ca := genCA "keycloak-ca" 365 }}
{{- $cert := genSignedCert (tpl .Values.ingress.hostname .) nil (list (tpl .Values.ingress.hostname .)) 365 $ca }}
apiVersion: v1
kind: Secret
metadata:
name: {{ $secretName }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: kubernetes.io/tls
data:
tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
{{- end }}
{{- end }}
{{- if (include "keycloak.createTlsSecret" $) }}
{{- $secretName := printf "%s-crt" (include "common.names.fullname" .) }}
{{- $ca := genCA "keycloak-ca" 365 }}
{{- $releaseNamespace := include "common.names.namespace" . }}
{{- $clusterDomain := .Values.clusterDomain }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ $secretName }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
app.kubernetes.io/component: keycloak
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- $replicaCount := int .Values.replicaCount }}
{{- $svcName := include "common.names.fullname" . }}
{{- $altNames := list (printf "%s.%s.svc.%s" $svcName $releaseNamespace $clusterDomain) (printf "%s.%s" $svcName $releaseNamespace) $svcName }}
{{- $cert := genSignedCert $svcName nil $altNames 365 $ca }}
tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
{{- end }}

File diff suppressed because it is too large Load Diff

View File

@ -1,50 +0,0 @@
fullnameOverride: keycloak
namespaceOverride: futureporn
postgresql:
enabled: false
externalDatabase:
host: postgresql-primary.futureporn.svc.cluster.local
user: postgres
existingSecret: postgresql
port: 5432
database: keycloak
logging:
level: INFO # INFO is default
service:
type: LoadBalancer
http:
enabled: true
ports:
http: 8080
annotations:
external-dns.alpha.kubernetes.io/hostname: keycloak.fp.sbtp.xyz
global:
defaultStorageClass: standard
proxy: edge
# curl -o /emptydir/app-providers-dir/patreon-provider.jar -Ls https://github.com/insanity54/keycloak-patreon-provider/releases/download/$tag/keycloak-patreon-provider-$tag.jar
#curl -H "X-Pinggy-No-Screen: 1" -o /emptydir/app-providers-dir/patreon-provider.jar -Ls http://a.free.pinggy.link/keycloak-patreon-provider-$tag.jar
initContainers:
- name: keycloak-patreon-provider-installer
image: alpine/curl:latest
imagePullPolicy: IfNotPresent
command:
- sh
- -c
- |
set -e
tag=1.3.0-SNAPSHOT
echo "Downloading $tag"
curl --max-time 60 -o /emptydir/app-providers-dir/patreon-provider.jar -Ls https://github.com/insanity54/keycloak-patreon-provider/releases/download/$tag/keycloak-patreon-provider-$tag.jar
chown 1001:1001 /emptydir/app-providers-dir/patreon-provider.jar
echo "Download completed with exit code $?"
volumeMounts:
- name: empty-dir
mountPath: /emptydir

108
config/deploy.yml Normal file
View File

@ -0,0 +1,108 @@
# Name of your application. Used to uniquely configure containers.
service: futureporn
# Name of the container image.
image: futureporn/bright
# Deploy to these servers.
servers:
web:
- 194.163.140.228
# job:
# hosts:
# - 192.168.0.1
# cmd: bin/jobs
# Enable SSL auto certification via Let's Encrypt and allow for multiple apps on a single web server.
# Remove this section when using multiple web servers and ensure you terminate SSL at your load balancer.
#
# Note: If using Cloudflare, set encryption mode in SSL/TLS setting to "Full" to enable CF-to-app encryption.
proxy:
ssl: true
host: app.example.com
# Proxy connects to your container on port 80 by default.
# app_port: 3000
# Credentials for your image host.
registry:
# Specify the registry server, if you're not using Docker Hub
# server: registry.digitalocean.com / ghcr.io / ...
server: gitea.futureporn.net
username: cj_clippy
# Always use an access token rather than real password (pulled from .kamal/secrets).
password:
- KAMAL_REGISTRY_PASSWORD
# Configure builder setup.
builder:
arch: amd64
context: ../
dockerfile: ./dockerfiles/bright.dockerfile
# Pass in additional build args needed for your Dockerfile.
# args:
# RUBY_VERSION: <%= File.read('.ruby-version').strip %>
# Inject ENV variables into containers (secrets come from .kamal/secrets).
#
# env:
# clear:
# DB_HOST: 192.168.0.2
# secret:
# - RAILS_MASTER_KEY
# Aliases are triggered with "bin/kamal <alias>". You can overwrite arguments on invocation:
# "bin/kamal logs -r job" will tail logs from the first server in the job section.
#
# aliases:
# shell: app exec --interactive --reuse "bash"
# Use a different ssh user than root
#
# ssh:
# user: app
# Use a persistent storage volume.
#
# volumes:
# - "app_storage:/app/storage"
# Bridge fingerprinted assets, like JS and CSS, between versions to avoid
# hitting 404 on in-flight requests. Combines all files from new and old
# version inside the asset_path.
#
# asset_path: /app/public/assets
# Configure rolling deploys by setting a wait time between batches of restarts.
#
# boot:
# limit: 10 # Can also specify as a percentage of total hosts, such as "25%"
# wait: 2
# Use accessory services (secrets come from .kamal/secrets).
#
# accessories:
# db:
# image: mysql:8.0
# host: 192.168.0.2
# port: 3306
# env:
# clear:
# MYSQL_ROOT_HOST: '%'
# secret:
# - MYSQL_ROOT_PASSWORD
# files:
# - config/mysql/production.cnf:/etc/mysql/my.cnf
# - db/production.sql:/docker-entrypoint-initdb.d/setup.sql
# directories:
# - data:/var/lib/mysql
# redis:
# image: valkey/valkey:8
# host: 192.168.0.2
# port: 6379
# directories:
# - data:/data

1
contrib/superstreamer Submodule

@ -0,0 +1 @@
Subproject commit 9e868acede851f396b3db98fb9799ab4bf712b02

View File

@ -13,7 +13,9 @@
"python310@latest",
"python310Packages.pip@latest",
"vips@latest",
"kubefwd@latest"
"hcloud@latest",
"ruby@latest",
"doppler@latest"
],
"env": {
"DEVBOX_COREPACK_ENABLED": "true",

View File

@ -97,6 +97,54 @@
}
}
},
"doppler@latest": {
"last_modified": "2024-12-23T21:10:33Z",
"resolved": "github:NixOS/nixpkgs/de1864217bfa9b5845f465e771e0ecb48b30e02d#doppler",
"source": "devbox-search",
"version": "3.71.0",
"systems": {
"aarch64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/6820wwrx8r525zq48bpv295wssiad4s1-doppler-3.71.0",
"default": true
}
],
"store_path": "/nix/store/6820wwrx8r525zq48bpv295wssiad4s1-doppler-3.71.0"
},
"aarch64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/lz24rlrrazcwfhz2kn5dszjwdcasvivw-doppler-3.71.0",
"default": true
}
],
"store_path": "/nix/store/lz24rlrrazcwfhz2kn5dszjwdcasvivw-doppler-3.71.0"
},
"x86_64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/k908q0cadca9p70jrfr2lcrfclpr343n-doppler-3.71.0",
"default": true
}
],
"store_path": "/nix/store/k908q0cadca9p70jrfr2lcrfclpr343n-doppler-3.71.0"
},
"x86_64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/0vsjlajvmzg816sfphg5mswv4ply20ys-doppler-3.71.0",
"default": true
}
],
"store_path": "/nix/store/0vsjlajvmzg816sfphg5mswv4ply20ys-doppler-3.71.0"
}
}
},
"ffmpeg@latest": {
"last_modified": "2024-07-24T00:53:51Z",
"resolved": "github:NixOS/nixpkgs/4f02464258baaf54992debfd010a7a3662a25536#ffmpeg",
@ -245,6 +293,54 @@
}
}
},
"hcloud@latest": {
"last_modified": "2024-12-23T21:10:33Z",
"resolved": "github:NixOS/nixpkgs/de1864217bfa9b5845f465e771e0ecb48b30e02d#hcloud",
"source": "devbox-search",
"version": "1.49.0",
"systems": {
"aarch64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/wf8q3kml6c2clgp9l3djnlcbmfph4vnn-hcloud-1.49.0",
"default": true
}
],
"store_path": "/nix/store/wf8q3kml6c2clgp9l3djnlcbmfph4vnn-hcloud-1.49.0"
},
"aarch64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/68crkxrn8nfrzsb5n2ri8w6v4bbg4w3m-hcloud-1.49.0",
"default": true
}
],
"store_path": "/nix/store/68crkxrn8nfrzsb5n2ri8w6v4bbg4w3m-hcloud-1.49.0"
},
"x86_64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/pjbjhza0bfvz29arl2h6c94bgs7ldf7c-hcloud-1.49.0",
"default": true
}
],
"store_path": "/nix/store/pjbjhza0bfvz29arl2h6c94bgs7ldf7c-hcloud-1.49.0"
},
"x86_64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/5q2ws8lrgkrfpm9b8r2s0h5i7d7c4bnx-hcloud-1.49.0",
"default": true
}
],
"store_path": "/nix/store/5q2ws8lrgkrfpm9b8r2s0h5i7d7c4bnx-hcloud-1.49.0"
}
}
},
"k9s@latest": {
"last_modified": "2024-07-20T09:11:00Z",
"resolved": "github:NixOS/nixpkgs/6e14bbce7bea6c4efd7adfa88a40dac750d80100#k9s",
@ -377,54 +473,6 @@
}
}
},
"kubefwd@latest": {
"last_modified": "2024-10-13T23:44:06Z",
"resolved": "github:NixOS/nixpkgs/d4f247e89f6e10120f911e2e2d2254a050d0f732#kubefwd",
"source": "devbox-search",
"version": "1.22.5",
"systems": {
"aarch64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/sq746gibrmkw13dlnbn7ybfl5hpdj3gx-kubefwd-1.22.5",
"default": true
}
],
"store_path": "/nix/store/sq746gibrmkw13dlnbn7ybfl5hpdj3gx-kubefwd-1.22.5"
},
"aarch64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/yydhb67m0n5pwhfkhmqw50x7bihhpss6-kubefwd-1.22.5",
"default": true
}
],
"store_path": "/nix/store/yydhb67m0n5pwhfkhmqw50x7bihhpss6-kubefwd-1.22.5"
},
"x86_64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/50xiwkb0lqn680m38w3jagrh4z696y92-kubefwd-1.22.5",
"default": true
}
],
"store_path": "/nix/store/50xiwkb0lqn680m38w3jagrh4z696y92-kubefwd-1.22.5"
},
"x86_64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/hh52q0fm8437y32v3ssih45n770fysaf-kubefwd-1.22.5",
"default": true
}
],
"store_path": "/nix/store/hh52q0fm8437y32v3ssih45n770fysaf-kubefwd-1.22.5"
}
}
},
"kubernetes-helm@latest": {
"last_modified": "2024-07-20T09:11:00Z",
"resolved": "github:NixOS/nixpkgs/6e14bbce7bea6c4efd7adfa88a40dac750d80100#kubernetes-helm",
@ -680,6 +728,71 @@
}
}
},
"ruby@latest": {
"last_modified": "2024-12-27T03:08:00Z",
"plugin_version": "0.0.2",
"resolved": "github:NixOS/nixpkgs/7cc0bff31a3a705d3ac4fdceb030a17239412210#ruby_3_4",
"source": "devbox-search",
"version": "3.4.1",
"systems": {
"aarch64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/qaxayv9z27mqdg3k0f8wn74yhv5vdw7d-ruby-3.4.1",
"default": true
},
{
"name": "devdoc",
"path": "/nix/store/9g35v966nxpgkjax0vifgyd7aq85qp0j-ruby-3.4.1-devdoc"
}
],
"store_path": "/nix/store/qaxayv9z27mqdg3k0f8wn74yhv5vdw7d-ruby-3.4.1"
},
"aarch64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/8ylfvxsav4kkk858ving6hf281a1vqs5-ruby-3.4.1",
"default": true
},
{
"name": "devdoc",
"path": "/nix/store/lzy6z85s93llj1kvmcdky8r1n7m9shxl-ruby-3.4.1-devdoc"
}
],
"store_path": "/nix/store/8ylfvxsav4kkk858ving6hf281a1vqs5-ruby-3.4.1"
},
"x86_64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/y9v3710alwdz99blk4whgp0jif8j5zsg-ruby-3.4.1",
"default": true
},
{
"name": "devdoc",
"path": "/nix/store/a9a0dxkd3ih6jyi0vj77n16hj9rj4y7s-ruby-3.4.1-devdoc"
}
],
"store_path": "/nix/store/y9v3710alwdz99blk4whgp0jif8j5zsg-ruby-3.4.1"
},
"x86_64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/lmfq7mn710jknr40ik46yf759jras0s3-ruby-3.4.1",
"default": true
},
{
"name": "devdoc",
"path": "/nix/store/4jx1cvll34y0kg2hp1p5w0im45ba7x53-ruby-3.4.1-devdoc"
}
],
"store_path": "/nix/store/lmfq7mn710jknr40ik46yf759jras0s3-ruby-3.4.1"
}
}
},
"tilt@latest": {
"last_modified": "2024-07-15T21:47:20Z",
"resolved": "github:NixOS/nixpkgs/b2c1f10bfbb3f617ea8e8669ac13f3f56ceb2ea2#tilt",

View File

@ -0,0 +1,28 @@
## Important! Build context is the ROOT of the project.
## this keeps the door open for future possibility of shared code between pnpm workspace packages
FROM oven/bun:1 AS base
RUN apt-get update && apt-get install -y \
curl
RUN mkdir -p /tmp/dev
WORKDIR /tmp/dev
COPY ./contrib/superstreamer .
RUN ls -la
# Install ffmpeg, ffprobe
RUN bun run install-bin
FROM oven/bun:1 AS install
RUN bun install
RUN bun run test
RUN bun run build
USER bun
EXPOSE 7991/tcp
WORKDIR /tmp/dev/packages/artisan
RUN ls -la ./dist
ENTRYPOINT [ "bun", "run", "./dist/index.js" ]

View File

@ -0,0 +1,111 @@
## 2024-12-26 -- file created using `mix phx.gen.release --docker`
# Find eligible builder and runner images on Docker Hub. We use Ubuntu/Debian
# instead of Alpine to avoid DNS resolution issues in production.
#
# https://hub.docker.com/r/hexpm/elixir/tags?page=1&name=ubuntu
# https://hub.docker.com/_/ubuntu?tab=tags
#
# This file is based on these images:
#
# - https://hub.docker.com/r/hexpm/elixir/tags - for the build image
# - https://hub.docker.com/_/debian?tab=tags&page=1&name=bullseye-20241202-slim - for the release image
# - https://pkgs.org/ - resource for finding needed packages
# - Ex: hexpm/elixir:1.17.3-erlang-27.1.2-debian-bullseye-20241202-slim
#
ARG ELIXIR_VERSION=1.17.3
ARG OTP_VERSION=27.1.2
ARG DEBIAN_VERSION=bullseye-20241202-slim
ARG BUILDER_IMAGE="hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}"
ARG RUNNER_IMAGE="debian:${DEBIAN_VERSION}"
FROM ${BUILDER_IMAGE} AS dev
# install build dependencies
RUN apt-get update -y && apt-get install -y build-essential git inotify-tools \
&& apt-get clean && rm -f /var/lib/apt/lists/*_*
# prepare build dir
WORKDIR /app
# install hex + rebar
RUN mix local.hex --force && \
mix local.rebar --force
# set build ENV
ENV MIX_ENV="dev"
# install mix dependencies
COPY ./services/bright/mix.exs ./services/bright/mix.lock ./
RUN mix deps.get --only $MIX_ENV
RUN mkdir config
# copy compile-time config files before we compile dependencies
# to ensure any relevant config change will trigger the dependencies
# to be re-compiled.
COPY ./services/bright/config/config.exs ./services/bright/config/${MIX_ENV}.exs config/
COPY ./services/bright/priv priv
COPY ./services/bright/lib lib
COPY ./services/bright/assets assets
COPY ./services/bright/test test
CMD ["mix", "phx.server"]
# # start a new build stage so that the final image will only contain
# # the compiled release and other runtime necessities
# FROM ${RUNNER_IMAGE}
# RUN apt-get update -y && \
# apt-get install -y libstdc++6 openssl libncurses5 locales ca-certificates \
# && apt-get clean && rm -f /var/lib/apt/lists/*_*
# # Set the locale
# RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen
# ENV LANG en_US.UTF-8
# ENV LANGUAGE en_US:en
# ENV LC_ALL en_US.UTF-8
# WORKDIR "/app"
# RUN chown nobody /app
# # set runner ENV
# ENV MIX_ENV="prod"
# # Only copy the final release from the build stage
# COPY --from=builder --chown=nobody:root /app/_build/${MIX_ENV}/rel/bright ./
# USER nobody
# # If using an environment that doesn't automatically reap zombie processes, it is
# # advised to add an init process such as tini via `apt-get install`
# # above and adding an entrypoint. See https://github.com/krallin/tini for details
# # ENTRYPOINT ["/tini", "--"]
# CMD ["/app/bin/server"]

View File

@ -0,0 +1,41 @@
## Important! Build context is the ROOT of the project.
## this keeps the door open for future possibility of shared code between pnpm workspace packages
# use the official Bun image
# see all versions at https://hub.docker.com/r/oven/bun/tags
FROM oven/bun:1 AS base
WORKDIR /usr/src/app
# install dependencies into temp directory
# this will cache them and speed up future builds
FROM base AS install
RUN mkdir -p /temp/dev
COPY ./services/htmx/package.json ./services/htmx/bun.lockb /temp/dev/
RUN cd /temp/dev && bun install --frozen-lockfile
# install with --production (exclude devDependencies)
RUN mkdir -p /temp/prod
COPY ./services/htmx/package.json ./services/htmx/bun.lockb /temp/prod/
RUN cd /temp/prod && bun install --frozen-lockfile --production
# copy node_modules from temp directory
# then copy all (non-ignored) project files into the image
FROM base AS prerelease
COPY --from=install /temp/dev/node_modules node_modules
COPY . .
# [optional] tests & build
ENV NODE_ENV=production
RUN bun test
RUN bun run build
# copy production dependencies and source code into final image
FROM base AS release
COPY --from=install /temp/prod/node_modules node_modules
COPY --from=prerelease /usr/src/app/index.ts .
COPY --from=prerelease /usr/src/app/package.json .
# run the app
USER bun
EXPOSE 7991/tcp
ENTRYPOINT [ "bun", "run", "index.ts" ]

View File

@ -0,0 +1,9 @@
## do-nothing scripts
### Motivation
https://blog.danslimmon.com/2019/07/15/do-nothing-scripting-the-key-to-gradual-automation/
### Install ABS programming language
bash <(curl https://www.abs-lang.org/installer.sh)

View File

@ -0,0 +1,43 @@
#! /usr/local/bin/abs
echo(" * Remux the .ts to .mp4 ")
echo(" [Press Enter When Complete...]")
_ = stdin()
echo(" * Temporarily serve the mp4 using `npx http-server ./serve` ")
echo(" [Press Enter When Complete...]")
_ = stdin()
echo(" * Generate a thumbnail ")
echo(" * Upload the .mp4 to Mux ")
echo(" * `IPFS add` the .mp4 ")
echo(" * Upload the .mp4 to Backblaze ")
echo(" [Press Enter When Complete...]")
_ = stdin()
echo(" * Create a B2 File (.mp4) in Strapi")
echo(" * Create a B2 File (thumbnail) in Strapi")
echo(" * Create a Mux asset in Strapi")
echo(" * Create a VOD in Strapi")
echo(" * Publish VOD")
echo(" [Press Enter When Complete...]")
_ = stdin()
echo(" * Notify Discord of new VOD")
echo(" [Press Enter When Complete...]")
_ = stdin()
echo(" * Backup the database")
echo(" [Press Enter When Complete...]")
_ = stdin()

View File

@ -297,28 +297,99 @@ export interface IPlatformNotificationResponse {
}
export interface ITimestamp {
id: number;
time: number;
}
export interface ITag {
id: number;
name: string;
}
export interface IS3File {
id: number;
url: string;
key: string;
uploadId: string;
cdn_url: string;
}
export interface IS3FileResponse {
data: IS3File;
meta: IMeta;
}
export interface IVod {
id: number;
uuid: string;
stream?: IStream;
published_at?: string;
uuid: string;
title?: string;
duration?: number;
date: string;
date2: string;
mux_asset?: IMuxAsset;
vtuber?: IVtuber;
cuid?: string;
tag_vod_relations?: any;
video240Hash?: string;
videoSrcHash?: string;
timestamps?: any;
announce_title?: string;
announce_url?: string;
videoSrcB2?: any;
uploader: any;
date_2: string;
mux_asset: IMuxAsset;
thumbnail?: IS3File;
vtuber: IVtuber;
s3_file: IS3File;
tag_vod_relations: ITagVodRelation[];
timestamps: ITimestamp[];
ipfs_cid: string;
announce_title: string;
announce_url: string;
uploader: IUserResponse;
note: string;
}
export interface IUser {
id: number;
username: string;
vanityLink?: string;
image: string;
}
export interface IUserResponse {
data: IUser;
meta: IMeta;
}
export interface ITagVodRelation {
id: number;
tag: ITag | IToyTag
vod: IVod;
creator_id: number;
created_at: string;
}
export interface IToyTag extends ITag {
toy: IToy;
}
export interface IToy {
id: number;
tags: ITag[];
linkTag: ITag[];
make: string;
model: string;
aspectRatio: string;
image2: string;
}
interface IToysListProps {
toys: IToy[];
page: number;
pageSize: number;
}
export interface IStream {
id: number;
date: string;

View File

@ -30,7 +30,7 @@ describe('image', function () {
describe('getStoryboard', function () {
this.timeout(1000*60*15)
it('should accept a URL and return a path to image on disk', async function () {
const url = 'https://futureporn-b2.b-cdn.net/projektmelody-chaturbate-2024-12-10.mp4'
const url = 'http://38.242.193.246:8081/projektmelody-chaturbate-2025-01-09.mp4'
const imagePath = await getStoryboard(url)
expect(imagePath).to.match(/\.png/)
})

View File

@ -42,35 +42,33 @@ EOF
# --from-literal=b2Key=${UPPY_B2_KEY} \
# --from-literal=b2Secret=${UPPY_B2_SECRET}\
kubectl --namespace futureporn delete secret superstreamer --ignore-not-found
kubectl --namespace futureporn create secret generic superstreamer \
--from-literal=databaseUri=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/sprs \
--from-literal=s3Endpoint=${S3_ENDPOINT} \
--from-literal=s3Region=${S3_REGION} \
--from-literal=s3AccessKey=${S3_ACCESS_KEY_ID} \
--from-literal=s3SecretKey=${S3_SECRET_ACCESS_KEY} \
--from-literal=s3Bucket=${S3_BUCKET_NAME} \
--from-literal=publicS3Endpoint=${PUBLIC_S3_ENDPOINT} \
--from-literal=superSecret=${SUPER_SECRET} \
--from-literal=authToken=${SUPERSTREAMER_AUTH_TOKEN}
kubectl --namespace futureporn delete secret bright --ignore-not-found
kubectl --namespace futureporn create secret generic bright \
--from-literal=databaseUrl=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/bright \
--from-literal=secretKeyBase=${BRIGHT_SECRET_KEY_BASE}
kubectl --namespace futureporn delete secret next --ignore-not-found
kubectl --namespace futureporn create secret generic next \
--from-literal=nextAuthSecret=${NEXTAUTH_SECRET}
kubectl --namespace futureporn delete secret keycloak --ignore-not-found
kubectl --namespace futureporn create secret generic keycloak \
--from-literal=adminPassword=${KEYCLOAK_ADMIN_PASSWORD} \
--from-literal=clientId=${KEYCLOAK_CLIENT_ID} \
--from-literal=clientSecret=${KEYCLOAK_CLIENT_SECRET}
kubectl --namespace futureporn delete secret traefik-dashboard-auth --ignore-not-found
kubectl --namespace futureporn create secret generic traefik-dashboard-auth \
--type=kubernetes.io/basic-auth \
--from-literal=password=${TRAEFIK_DASHBOARD_PASSWORD} \
--from-literal=username=${TRAEFIK_DASHBOARD_USERNAME}
kubectl --namespace futureporn delete secret logto --ignore-not-found
kubectl --namespace futureporn create secret generic logto \
--from-literal=postgresqlUri=${LOGTO_POSTGRESQL_URI} \
--from-literal=cookieSecret=${LOGTO_COOKIE_SECRET} \
--from-literal=appSecret=${LOGTO_APP_SECRET} \
--from-literal=appId=${LOGTO_APP_ID}
kubectl --namespace futureporn delete secret supertokens --ignore-not-found
kubectl --namespace futureporn create secret generic supertokens \
--from-literal=apiKeys=${SUPERTOKENS_API_KEYS} \
--from-literal=apiKey=${SUPERTOKENS_API_KEY} \
--from-literal=postgresqlUri=${SUPERTOKENS_POSTGRESQL_URI}
kubectl --namespace futureporn delete secret patreon --ignore-not-found
kubectl --namespace futureporn create secret generic patreon \
--from-literal=creatorAccessToken=${PATREON_CREATOR_ACCESS_TOKEN} \
@ -78,16 +76,6 @@ kubectl --namespace futureporn create secret generic patreon \
--from-literal=clientId=${PATREON_CLIENT_ID} \
--from-literal=clientSecret=${PATREON_CLIENT_SECRET}
kubectl --namespace futureporn delete secret mariadb --ignore-not-found
kubectl --namespace futureporn create secret generic mariadb \
--from-literal=mariadb-root-password=${MARIADB_ROOT_PASSWORD} \
--from-literal=mariadb-password=${MARIADB_PASSWORD} \
--from-literal=mariadb-replication-password=${MARIADB_REPLICATION_PASSWORD}
kubectl --namespace futureporn delete secret externaldb --ignore-not-found
kubectl --namespace futureporn create secret generic externaldb \
--from-literal=db-password=${MARIADB_PASSWORD}
kubectl --namespace futureporn delete secret chisel --ignore-not-found
kubectl --namespace futureporn create secret generic chisel \
--from-literal=auth="${CHISEL_USERNAME}:${CHISEL_PASSWORD}"
@ -96,12 +84,6 @@ kubectl --namespace chisel-operator-system delete secret chisel --ignore-not-fou
kubectl --namespace chisel-operator-system create secret generic chisel \
--from-literal=auth="${CHISEL_USERNAME}:${CHISEL_PASSWORD}"
kubectl --namespace futureporn delete secret ngrok --ignore-not-found
kubectl --namespace futureporn create secret generic ngrok \
--from-literal=API_KEY=${NGROK_API_KEY} \
--from-literal=AUTHTOKEN=${NGROK_AUTHTOKEN} \
--from-literal=domain=${NGROK_DOMAIN}
kubectl --namespace futureporn delete secret bot --ignore-not-found
kubectl --namespace futureporn create secret generic bot \
--from-literal=automationUserJwt=${AUTOMATION_USER_JWT} \

View File

@ -1,4 +0,0 @@
#!/bin/bash
kubectl -n futureporn exec postgresql-primary-0 -- env PGPASSWORD=${POSTGRES_PASSWORD} psql -U postgres --command "CREATE DATABASE keycloak;"
echo "Done."

View File

@ -1,4 +0,0 @@
#!/bin/bash
kubectl -n futureporn exec postgresql-primary-0 -- env PGPASSWORD=${POSTGRES_PASSWORD} psql -U postgres --command "CREATE DATABASE supertokens;"
echo "Done."

View File

@ -1,7 +0,0 @@
/* @import "tailwindcss/base";
@import "tailwindcss/components";
@import "tailwindcss/utilities"; */
@import "./bulma.min.css";
/* This file is for your main application CSS */

View File

@ -1,4 +1,3 @@
/* This file is for your main application CSS */
// @import "./phoenix.css";
@import "bulma";

View File

@ -21,11 +21,14 @@ import "phoenix_html"
import {Socket} from "phoenix"
import {LiveSocket} from "phoenix_live_view"
import topbar from "../vendor/topbar"
import Hooks from './hooks/index.js'
let csrfToken = document.querySelector("meta[name='csrf-token']").getAttribute("content")
let liveSocket = new LiveSocket("/live", Socket, {
longPollFallbackMs: 2500,
params: {_csrf_token: csrfToken}
params: {_csrf_token: csrfToken},
hooks: Hooks
})
// Show progress bar on live navigation and form submits

View File

@ -0,0 +1,7 @@
import VideojsHook from "./videojs_hook.js"
let Hooks = {}
Hooks.VideojsHook = VideojsHook
export default Hooks

View File

@ -0,0 +1,47 @@
// @see https://hexdocs.pm/phoenix_live_view/js-interop.html#client-hooks-via-phx-hook
const VideojsHook = {
mounted() {
// var options = {};
// // console.log('mounted() hook. looking for #video-player')
// var player = videojs('player', options, function onPlayerReady() {
// // videojs.log('Your player is ready! (this is videojs_hook.js mounted() hook btw)');
// // How about an event listener?
// this.on('ended', function() {
// videojs.log('Awww...over so soon?!');
// });
// });
// // player.hlsQualitySelector({ displayCurrentQuality: true })
// // player.qualityLevels()
// player.src({
// src: 'https://fp-dev.b-cdn.net/package/cea2db20-1d89-4f8b-855f-d1c2e6ae2302/test2/master.m3u8',
// type: 'application/x-mpegURL',
// withCredentials: false
// });
},
beforeUpdate() {},
updated() {
console.log("VideojsHook updated");
},
destroyed() {},
disconnected() {},
reconnected() {}
}
export default VideojsHook

View File

@ -0,0 +1,97 @@
/*
Docs: https://hexdocs.pm/phoenix_live_view/js-interop.html#client-hooks
Usage: when using phx-hook, a unique DOM ID must always be set.
<div phx-hook="ExampleHook" id="someUniqueId"></div>
*/
// import 'vidstack/styles/defaults.css'
// import 'vidstack/styles/community-skin/video.css'
// import { defineCustomElements } from 'vidstack/elements';
// import { VidstackPlayer, VidstackPlayerLayout } from 'vidstack/global/player';
// import { VidstackPlayer } from 'vidstack'
// import { VidstackPlayer, VidstackPlayerLayout } from 'https://cdn.vidstack.io/player';
// import { HlsFacade } from "../../vendor/superstreamer-player.js"
// import "../../vendor/player.js"
const VidstackHook = {
// This function runs when the element has been added to the DOM and its server LiveView has finished mounting
mounted() {
// defineCustomElements();
// let currentEl = this.el;
// // console.log("VidstackHook mounted");
// player = document.querySelector('media-player');
console.log('hello!')
var video = document.getElementById('video');
if (Hls.isSupported()) {
var hls = new Hls({
debug: true,
});
hls.loadSource('https://test-streams.mux.dev/x36xhzz/x36xhzz.m3u8');
hls.attachMedia(video);
hls.on(Hls.Events.MEDIA_ATTACHED, function () {
video.muted = true;
video.play();
});
}
// hls.js is not supported on platforms that do not have Media Source Extensions (MSE) enabled.
// When the browser has built-in HLS support (check using `canPlayType`), we can provide an HLS manifest (i.e. .m3u8 URL) directly to the video element through the `src` property.
// This is using the built-in support of the plain video element, without using hls.js.
else if (video.canPlayType('application/vnd.apple.mpegurl')) {
video.src = 'https://test-streams.mux.dev/x36xhzz/x36xhzz.m3u8';
video.addEventListener('canplay', function () {
video.play();
});
}
// console.log(player);
// console.log(defineCustomElements)
// defineCustomElements()
// VidstackPlayer.create({
// target: '#media-player',
// title: 'Sprite Fight',
// src: 'https://files.vidstack.io/sprite-fight/hls/stream.m3u8',
// poster: 'https://files.vidstack.io/sprite-fight/poster.webp',
// layout: new VidstackPlayerLayout({
// thumbnails: 'https://files.vidstack.io/sprite-fight/thumbnails.vtt',
// }),
// });
},
// This function runs when the element is about to be updated in the DOM. Note: any call here must be synchronous as the operation cannot be deferred or cancelled.
beforeUpdate() {},
// This function runs when the element has been updated in the DOM by the server
updated() {
console.log("VidstackHook updated");
},
// This function runs when the element has been removed from the page, either by a parent update, or by the parent being removed entirely
destroyed() {},
// This function runs when the element's parent LiveView has disconnected from the server
disconnected() {},
// This function runs when the element's parent LiveView has reconnected to the server
reconnected() {},
};
export default VidstackHook;

View File

@ -1,74 +0,0 @@
// See the Tailwind configuration guide for advanced usage
// https://tailwindcss.com/docs/configuration
const plugin = require("tailwindcss/plugin")
const fs = require("fs")
const path = require("path")
module.exports = {
content: [
"./js/**/*.js",
"../lib/bright_web.ex",
"../lib/bright_web/**/*.*ex"
],
theme: {
extend: {
colors: {
brand: "#FD4F00",
}
},
},
plugins: [
require("@tailwindcss/forms"),
// Allows prefixing tailwind classes with LiveView classes to add rules
// only when LiveView classes are applied, for example:
//
// <div class="phx-click-loading:animate-ping">
//
plugin(({addVariant}) => addVariant("phx-click-loading", [".phx-click-loading&", ".phx-click-loading &"])),
plugin(({addVariant}) => addVariant("phx-submit-loading", [".phx-submit-loading&", ".phx-submit-loading &"])),
plugin(({addVariant}) => addVariant("phx-change-loading", [".phx-change-loading&", ".phx-change-loading &"])),
// Embeds Heroicons (https://heroicons.com) into your app.css bundle
// See your `CoreComponents.icon/1` for more information.
//
plugin(function({matchComponents, theme}) {
let iconsDir = path.join(__dirname, "../deps/heroicons/optimized")
let values = {}
let icons = [
["", "/24/outline"],
["-solid", "/24/solid"],
["-mini", "/20/solid"],
["-micro", "/16/solid"]
]
icons.forEach(([suffix, dir]) => {
fs.readdirSync(path.join(iconsDir, dir)).forEach(file => {
let name = path.basename(file, ".svg") + suffix
values[name] = {name, fullPath: path.join(iconsDir, dir, file)}
})
})
matchComponents({
"hero": ({name, fullPath}) => {
let content = fs.readFileSync(fullPath).toString().replace(/\r?\n|\r/g, "")
let size = theme("spacing.6")
if (name.endsWith("-mini")) {
size = theme("spacing.5")
} else if (name.endsWith("-micro")) {
size = theme("spacing.4")
}
return {
[`--hero-${name}`]: `url('data:image/svg+xml;utf8,${content}')`,
"-webkit-mask": `var(--hero-${name})`,
"mask": `var(--hero-${name})`,
"mask-repeat": "no-repeat",
"background-color": "currentColor",
"vertical-align": "middle",
"display": "inline-block",
"width": size,
"height": size
}
}
}, {values})
})
]
}

29370
services/bright/assets/vendor/hls.js vendored Normal file

File diff suppressed because it is too large Load Diff

1609
services/bright/assets/vendor/player.js vendored Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -23,7 +23,14 @@ config :bright, BrightWeb.Endpoint,
live_view: [signing_salt: "JGNufzrG"]
config :bright, Oban,
engine: Oban.Engines.Basic,
queues: [default: 10],
repo: Bright.Repo,
plugins: [
{Oban.Plugins.Pruner, max_age: 60 * 60 * 24 * 7},
{Oban.Plugins.Lifeline, rescue_after: :timer.minutes(30)}
]
# Configures the mailer
@ -48,7 +55,7 @@ config :esbuild,
# Configure dart_sass, used for bulma
config :dart_sass,
version: "1.61.0",
default: [
bright: [
args: ~w(--load-path=../deps/bulma css:../priv/static/assets),
cd: Path.expand("../assets", __DIR__)
]

View File

@ -27,7 +27,7 @@ config :bright, BrightWeb.Endpoint,
sass: {
DartSass,
:install_and_run,
[:default, ~w(--embed-source-map --source-map-urls=absolute --watch)]
[:bright, ~w(--embed-source-map --source-map-urls=absolute --watch)]
}
]
@ -66,6 +66,7 @@ config :bright, BrightWeb.Endpoint,
# Enable dev routes for dashboard and mailbox
config :bright, dev_routes: true
config :bright, superstreamer_api_client: Bright.Superstreamer.Client
# Do not include metadata nor timestamps in development logs
config :logger, :console, format: "[$level] $message\n"

View File

@ -1,5 +1,8 @@
import Config
# Only in tests, remove the complexity from the password hashing algorithm
config :bcrypt_elixir, :log_rounds, 1
# Configure the database
#
# The MIX_TEST_PARTITION environment variable can be used
@ -18,6 +21,12 @@ config :bright, BrightWeb.Endpoint,
secret_key_base: "#{System.get_env("SECRET_KEY_BASE")}",
server: false
# Prevent Oban from running jobs and plugins during test runs
config :bright, Oban, testing: :inline
# Have Superstreamer use mocks during testing
config :bright, superstreamer_api_client: ApiClientBehaviorMock
# In test we don't send emails
config :bright, Bright.Mailer, adapter: Swoosh.Adapters.Test

View File

@ -7,6 +7,7 @@ defmodule Bright.Application do
@impl true
def start(_type, _args) do
Oban.Telemetry.attach_default_logger(level: :debug)
children = [
BrightWeb.Telemetry,
Bright.Repo,
@ -14,10 +15,11 @@ defmodule Bright.Application do
{Phoenix.PubSub, name: Bright.PubSub},
# Start the Finch HTTP client for sending emails
{Finch, name: Bright.Finch},
{Oban, Application.fetch_env!(:bright, Oban)},
# Start a worker by calling: Bright.Worker.start_link(arg)
# {Bright.Worker, arg},
# Start to serve requests, typically the last entry
BrightWeb.Endpoint
BrightWeb.Endpoint,
]
# See https://hexdocs.pm/elixir/Supervisor.html

View File

@ -0,0 +1,146 @@
defmodule Bright.Jobs.CreateHlsPlaylist do
use Oban.Worker, queue: :default, max_attempts: 3
alias Bright.Repo
alias Bright.Streams.Vod
require Logger
@auth_token System.get_env("SUPERSTREAMER_AUTH_TOKEN")
@api_url System.get_env("SUPERSTREAMER_URL")
@public_s3_endpoint System.get_env("PUBLIC_S3_ENDPOINT")
@impl Oban.Worker
def perform(%Oban.Job{args: %{"vod_id" => vod_id, "input_url" => input_url}}) do
vod = Repo.get!(Vod, vod_id)
payload = build_payload(input_url)
Logger.info("Starting transcoding for VOD ID #{vod_id}")
with {:ok, transcode_job_id} <- start_transcode(payload),
{:ok, asset_id} <- poll_job_completion(transcode_job_id),
{:ok, package_job_id} <- start_package(transcode_job_id),
{:ok, asset_id} <- poll_job_completion(package_job_id) do
update_vod_with_playlist_url(vod, package_job_id)
Logger.info("HLS playlist created and updated for VOD ID #{vod_id}")
else
{:error, reason} ->
Logger.error("Failed to create HLS playlist for VOD ID #{vod_id}: #{inspect(reason)}")
{:error, reason}
end
end
defp build_payload(input_url) do
%{
"inputs" => [
%{"type" => "audio", "path" => input_url, "language" => "eng"},
%{"type" => "video", "path" => input_url}
],
"streams" => [
%{"type" => "video", "codec" => "h264", "height" => 720},
%{"type" => "video", "codec" => "h264", "height" => 144},
%{"type" => "audio", "codec" => "aac"}
],
"tag" => "create_hls_playlist"
}
end
defp start_transcode(payload) do
Logger.info("Starting transcode with payload: #{inspect(payload)}")
headers = auth_headers()
case HTTPoison.post("#{@api_url}/transcode", Jason.encode!(payload), headers) do
{:ok, %HTTPoison.Response{status_code: 200, body: body}} ->
case Jason.decode(body) do
{:ok, %{"jobId" => job_id}} -> {:ok, job_id}
{:error, _} = error -> error
end
{:ok, %HTTPoison.Response{status_code: status, body: body}} ->
{:error, %{status: status, body: body}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, reason}
end
end
defp start_package(asset_id) do
payload = %{
"assetId" => asset_id,
"concurrency" => 5,
"public" => false
}
Logger.info("Starting packaging for asset ID #{asset_id}")
headers = auth_headers()
Logger.info("@TODO @TODO @TODO")
{:error, "missing implementation."}
# case HTTPoison.post("#{@api_url}/package", Jason.encode!(payload), headers) do
# {:ok, %HTTPoison.Response{status_code: 200, body: body}} ->
# case Jason.decode(body) do
# {:ok, %{"jobId" => job_id}} -> {:ok, job_id}
# {:error, _} = error -> error
# end
# {:ok, %HTTPoison.Response{status_code: status, body: body}} ->
# {:error, %{status: status, body: body}}
# {:error, %HTTPoison.Error{reason: reason}} ->
# {:error, reason}
# end
end
defp poll_job_completion(job_id) do
Logger.info("Polling job completion for Job ID #{job_id}")
poll_interval = 5_000
max_retries = 999
Enum.reduce_while(1..max_retries, :ok, fn _, acc ->
case get_job_status(job_id) do
{:ok, "completed"} ->
Logger.info("Job ID #{job_id} completed successfully")
{:halt, :ok}
{:ok, _state} ->
:timer.sleep(poll_interval)
{:cont, acc}
{:error, reason} ->
Logger.error("Error polling job ID #{job_id}: #{inspect(reason)}")
{:halt, {:error, reason}}
end
end)
end
defp get_job_status(job_id) do
headers = auth_headers()
end
defp update_vod_with_playlist_url(vod, job_id) do
playlist_url = generate_playlist_url(job_id)
vod
|> Ecto.Changeset.change(playlist_url: playlist_url)
|> Repo.update!()
end
defp generate_playlist_url(job_id), do: "#{@public_s3_endpoint}/package/#{job_id}/hls/master.m3u8"
defp auth_headers do
[
{"authorization", "Bearer #{@auth_token}"},
{"content-type", "application/json"}
]
end
end

View File

@ -0,0 +1,138 @@
defmodule Bright.Jobs.CreateHlsPlaylist do
alias Bright.Repo
alias Bright.Streams.Vod
use Oban.Worker,
queue: :default,
max_attempts: 3,
tags: ["video", "vod"]
require Logger
@auth_token System.get_env("SUPERSTREAMER_AUTH_TOKEN")
@api_url System.get_env("SUPERSTREAMER_API_URL")
@public_s3_endpoint System.get_env("PUBLIC_S3_ENDPOINT")
@impl Oban.Worker
def perform(%Oban.Job{args: %{"vod_id" => vod_id, "input_url" => input_url}}) do
Logger.info("Starting CreateHlsPlaylist job",
job_id: job.id,
vod_id: vod_id,
input_url: input_url
)
with {:ok, transcode_job_id} <- start_transcode(input_url),
{:ok, asset_id} <- wait_for_job(transcode_job_id),
{:ok, package_job_id} <- start_package(asset_id),
{:ok, _} <- wait_for_job(package_job_id) do
update_vod_playlist_url(vod_id, package_job_id)
end
end
defp start_transcode(input_url) do
payload = %{
inputs: [
%{type: "video", path: input_url},
%{type: "audio", path: input_url, language: "eng"}
],
streams: [
%{type: "video", codec: "h264", height: 360},
%{type: "video", codec: "h264", height: 144},
%{type: "audio", codec: "aac"}
],
packageAfter: false
}
case HTTPoison.post("#{@api_url}/transcode", Jason.encode!(payload), headers()) do
{:ok, %{status_code: 200, body: body}} ->
%{"jobId" => job_id} = Jason.decode!(body)
{:ok, job_id}
error ->
Logger.error("Failed to start transcode: #{inspect(error)}")
{:error, :transcode_failed}
end
end
defp start_package(asset_id) do
payload = %{
assetId: asset_id,
concurrency: 5,
public: false,
name: "vod_#{asset_id}"
}
case HTTPoison.post("#{@api_url}/package", Jason.encode!(payload), headers()) do
{:ok, %{status_code: 200, body: body}} ->
%{"jobId" => job_id} = Jason.decode!(body)
{:ok, job_id}
error ->
Logger.error("Failed to start package: #{inspect(error)}")
{:error, :package_failed}
end
end
defp wait_for_job(job_id) do
case poll_job_status(job_id) do
{:ok, %{"state" => "completed", "outputData" => output_data}} ->
case Jason.decode(output_data) do
{:ok, %{"assetId" => asset_id}} -> {:ok, asset_id}
_ -> {:ok, job_id} # For package jobs, we just need the job_id
end
{:ok, %{"state" => "failed", "stacktrace" => stacktrace}} ->
Logger.error("Job failed: #{job_id}, stacktrace: #{inspect(stacktrace)}")
{:error, :job_failed}
error ->
Logger.error("Error polling job status: #{inspect(error)}")
{:error, :polling_failed}
end
end
defp poll_job_status(job_id, attempts \\ 0) do
if attempts >= 360 do # 30 minutes maximum (5 seconds * 360)
{:error, :timeout}
else
case HTTPoison.get("#{@api_url}/jobs/#{job_id}", headers()) do
{:ok, %{status_code: 200, body: body}} ->
job = Jason.decode!(body)
case job do
%{"state" => state} when state in ["completed", "failed"] ->
{:ok, job}
_ ->
Process.sleep(5000) # Wait 5 seconds before next poll
poll_job_status(job_id, attempts + 1)
end
error ->
Logger.error("Failed to poll job status: #{inspect(error)}")
{:error, :polling_failed}
end
end
end
defp update_vod_playlist_url(vod_id, package_job_id) do
playlist_url = generate_playlist_url(package_job_id)
case Vod.update_vod(vod_id, %{playlist_url: playlist_url}) do
{:ok, _vod} -> :ok
error ->
Logger.error("Failed to update VOD playlist URL: #{inspect(error)}")
{:error, :update_failed}
end
end
defp generate_playlist_url(job_id), do: "#{@public_s3_endpoint}/package/#{job_id}/hls/master.m3u8"
defp headers do
[
{"Authorization", "Bearer #{@auth_token}"},
{"Content-Type", "application/json"}
]
end
end

View File

@ -0,0 +1,31 @@
defmodule Bright.Jobs.ProcessVod do
use Oban.Worker, queue: :default, max_attempts: 3
require Logger
alias Bright.Repo
alias Bright.Streams.Vod
alias Bright.Jobs.CreateHlsPlaylist
@impl Oban.Worker
def perform(%Oban.Job{args: %{"id" => id}} = job) do
Logger.info("Performing job: #{inspect(job)}")
vod = Repo.get!(Vod, id)
cond do
vod.playlist_url == nil and vod.origin_temp_input_url != nil ->
queue_create_hls_playlist(vod)
:ok
true ->
:ok
end
end
defp queue_create_hls_playlist(%Vod{id: id, origin_temp_input_url: url}) do
job_args = %{vod_id: id, input_url: url}
Oban.insert!(CreateHlsPlaylist.new(job_args))
end
end

View File

@ -1,104 +0,0 @@
defmodule Bright.Platforms do
@moduledoc """
The Platforms context.
"""
import Ecto.Query, warn: false
alias Bright.Repo
alias Bright.Platforms.Platform
@doc """
Returns the list of platforms.
## Examples
iex> list_platforms()
[%Platform{}, ...]
"""
def list_platforms do
Repo.all(Platform)
end
@doc """
Gets a single platform.
Raises `Ecto.NoResultsError` if the Platform does not exist.
## Examples
iex> get_platform!(123)
%Platform{}
iex> get_platform!(456)
** (Ecto.NoResultsError)
"""
def get_platform!(id), do: Repo.get!(Platform, id)
@doc """
Creates a platform.
## Examples
iex> create_platform(%{field: value})
{:ok, %Platform{}}
iex> create_platform(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_platform(attrs \\ %{}) do
%Platform{}
|> Platform.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a platform.
## Examples
iex> update_platform(platform, %{field: new_value})
{:ok, %Platform{}}
iex> update_platform(platform, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_platform(%Platform{} = platform, attrs) do
platform
|> Platform.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a platform.
## Examples
iex> delete_platform(platform)
{:ok, %Platform{}}
iex> delete_platform(platform)
{:error, %Ecto.Changeset{}}
"""
def delete_platform(%Platform{} = platform) do
Repo.delete(platform)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking platform changes.
## Examples
iex> change_platform(platform)
%Ecto.Changeset{data: %Platform{}}
"""
def change_platform(%Platform{} = platform, attrs \\ %{}) do
Platform.changeset(platform, attrs)
end
end

View File

@ -0,0 +1,79 @@
defmodule Bright.Platforms.PlatformNotifier do
import Swoosh.Email
alias Bright.Mailer
# Delivers the email using the application mailer.
defp deliver(recipient, subject, body) do
email =
new()
|> to(recipient)
|> from({"Bright", "contact@example.com"})
|> subject(subject)
|> text_body(body)
with {:ok, _metadata} <- Mailer.deliver(email) do
{:ok, email}
end
end
@doc """
Deliver instructions to confirm account.
"""
def deliver_confirmation_instructions(platform, url) do
deliver(platform.email, "Confirmation instructions", """
==============================
Hi #{platform.email},
You can confirm your account by visiting the URL below:
#{url}
If you didn't create an account with us, please ignore this.
==============================
""")
end
@doc """
Deliver instructions to reset a platform password.
"""
def deliver_reset_password_instructions(platform, url) do
deliver(platform.email, "Reset password instructions", """
==============================
Hi #{platform.email},
You can reset your password by visiting the URL below:
#{url}
If you didn't request this change, please ignore this.
==============================
""")
end
@doc """
Deliver instructions to update a platform email.
"""
def deliver_update_email_instructions(platform, url) do
deliver(platform.email, "Update email instructions", """
==============================
Hi #{platform.email},
You can change your email by visiting the URL below:
#{url}
If you didn't request this change, please ignore this.
==============================
""")
end
end

View File

@ -0,0 +1,179 @@
defmodule Bright.Platforms.PlatformToken do
use Ecto.Schema
import Ecto.Query
alias Bright.Platforms.PlatformToken
@hash_algorithm :sha256
@rand_size 32
# It is very important to keep the reset password token expiry short,
# since someone with access to the email may take over the account.
@reset_password_validity_in_days 1
@confirm_validity_in_days 7
@change_email_validity_in_days 7
@session_validity_in_days 60
schema "platforms_tokens" do
field :token, :binary
field :context, :string
field :sent_to, :string
belongs_to :platform, Bright.Platforms.Platform
timestamps(type: :utc_datetime, updated_at: false)
end
@doc """
Generates a token that will be stored in a signed place,
such as session or cookie. As they are signed, those
tokens do not need to be hashed.
The reason why we store session tokens in the database, even
though Phoenix already provides a session cookie, is because
Phoenix' default session cookies are not persisted, they are
simply signed and potentially encrypted. This means they are
valid indefinitely, unless you change the signing/encryption
salt.
Therefore, storing them allows individual platform
sessions to be expired. The token system can also be extended
to store additional data, such as the device used for logging in.
You could then use this information to display all valid sessions
and devices in the UI and allow users to explicitly expire any
session they deem invalid.
"""
def build_session_token(platform) do
token = :crypto.strong_rand_bytes(@rand_size)
{token, %PlatformToken{token: token, context: "session", platform_id: platform.id}}
end
@doc """
Checks if the token is valid and returns its underlying lookup query.
The query returns the platform found by the token, if any.
The token is valid if it matches the value in the database and it has
not expired (after @session_validity_in_days).
"""
def verify_session_token_query(token) do
query =
from token in by_token_and_context_query(token, "session"),
join: platform in assoc(token, :platform),
where: token.inserted_at > ago(@session_validity_in_days, "day"),
select: platform
{:ok, query}
end
@doc """
Builds a token and its hash to be delivered to the platform's email.
The non-hashed token is sent to the platform email while the
hashed part is stored in the database. The original token cannot be reconstructed,
which means anyone with read-only access to the database cannot directly use
the token in the application to gain access. Furthermore, if the user changes
their email in the system, the tokens sent to the previous email are no longer
valid.
Users can easily adapt the existing code to provide other types of delivery methods,
for example, by phone numbers.
"""
def build_email_token(platform, context) do
build_hashed_token(platform, context, platform.email)
end
defp build_hashed_token(platform, context, sent_to) do
token = :crypto.strong_rand_bytes(@rand_size)
hashed_token = :crypto.hash(@hash_algorithm, token)
{Base.url_encode64(token, padding: false),
%PlatformToken{
token: hashed_token,
context: context,
sent_to: sent_to,
platform_id: platform.id
}}
end
@doc """
Checks if the token is valid and returns its underlying lookup query.
The query returns the platform found by the token, if any.
The given token is valid if it matches its hashed counterpart in the
database and the user email has not changed. This function also checks
if the token is being used within a certain period, depending on the
context. The default contexts supported by this function are either
"confirm", for account confirmation emails, and "reset_password",
for resetting the password. For verifying requests to change the email,
see `verify_change_email_token_query/2`.
"""
def verify_email_token_query(token, context) do
case Base.url_decode64(token, padding: false) do
{:ok, decoded_token} ->
hashed_token = :crypto.hash(@hash_algorithm, decoded_token)
days = days_for_context(context)
query =
from token in by_token_and_context_query(hashed_token, context),
join: platform in assoc(token, :platform),
where: token.inserted_at > ago(^days, "day") and token.sent_to == platform.email,
select: platform
{:ok, query}
:error ->
:error
end
end
defp days_for_context("confirm"), do: @confirm_validity_in_days
defp days_for_context("reset_password"), do: @reset_password_validity_in_days
@doc """
Checks if the token is valid and returns its underlying lookup query.
The query returns the platform found by the token, if any.
This is used to validate requests to change the platform
email. It is different from `verify_email_token_query/2` precisely because
`verify_email_token_query/2` validates the email has not changed, which is
the starting point by this function.
The given token is valid if it matches its hashed counterpart in the
database and if it has not expired (after @change_email_validity_in_days).
The context must always start with "change:".
"""
def verify_change_email_token_query(token, "change:" <> _ = context) do
case Base.url_decode64(token, padding: false) do
{:ok, decoded_token} ->
hashed_token = :crypto.hash(@hash_algorithm, decoded_token)
query =
from token in by_token_and_context_query(hashed_token, context),
where: token.inserted_at > ago(@change_email_validity_in_days, "day")
{:ok, query}
:error ->
:error
end
end
@doc """
Returns the token struct for the given token value and context.
"""
def by_token_and_context_query(token, context) do
from PlatformToken, where: [token: ^token, context: ^context]
end
@doc """
Gets all tokens for the given platform for the given contexts.
"""
def by_platform_and_contexts_query(platform, :all) do
from t in PlatformToken, where: t.platform_id == ^platform.id
end
def by_platform_and_contexts_query(platform, [_ | _] = contexts) do
from t in PlatformToken, where: t.platform_id == ^platform.id and t.context in ^contexts
end
end

View File

@ -196,7 +196,29 @@ defmodule Bright.Streams do
%Vod{}
|> Vod.changeset(attrs)
|> Repo.insert()
|> case do
{:ok, vod} ->
maybe_enqueue_process_vod(vod)
{:ok, vod}
{:error, changeset} ->
{:error, changeset}
end
end
defp maybe_enqueue_process_vod(%Vod{id: id, origin_temp_input_url: origin_temp_input_url} = vod) do
if origin_temp_input_url do
%{id: id, origin_temp_input_url: origin_temp_input_url}
|> Bright.Jobs.ProcessVod.new()
|> Oban.insert()
end
vod
end
@doc """
Updates a vod.

View File

@ -3,6 +3,8 @@ defmodule Bright.Streams.Vod do
import Ecto.Changeset
schema "vods" do
field :origin_temp_input_url, :string
field :playlist_url, :string
field :s3_cdn_url, :string
field :s3_upload_id, :string
field :s3_key, :string
@ -21,9 +23,10 @@ defmodule Bright.Streams.Vod do
@doc false
def changeset(vod, attrs) do
vod
|> cast(attrs, [:s3_cdn_url, :s3_upload_id, :s3_key, :s3_bucket, :mux_asset_id, :mux_playback_id, :ipfs_cid, :torrent, :stream_id])
|> cast(attrs, [:s3_cdn_url, :s3_upload_id, :s3_key, :s3_bucket, :mux_asset_id, :mux_playback_id, :ipfs_cid, :torrent, :stream_id, :origin_temp_input_url, :playlist_url])
|> validate_required([:stream_id])
end
end

View File

@ -0,0 +1,19 @@
defmodule Bright.Superstreamer.ApiClient do
@behaviour Bright.Superstreamer.ApiClientBehaviour
def api_client, do: Application.get_env(:bright, :superstreamer_api_client)
def create_transcode() do
end
def create_package() do
end
def create_pipeline() do
end
end

View File

@ -0,0 +1,6 @@
defmodule Bright.Superstreamer.ApiClientBehaviour do
@moduledoc false
@callback create_transcode(String.t(), String.t()) :: tuple()
@callback create_package(String.t(), String.t()) :: tuple()
@callback create_pipeline(String.t()) :: tuple()
end

View File

@ -0,0 +1,32 @@
defmodule Bright.Superstreamer.HttpAdapter do
use HTTPoison.Base
defp access_token, do: Application.get_env(:bright, :superstreamer_token)
def base_url, do: Application.get_env(:bright, :superstreamer_base_url)
def process_url(url) do
base_url() <> url
end
def post(url, params) do
post(url, Jason.encode!(params), headers())
end
def get(url) do
get(url, headers())
end
defp headers do
[
{"Authorization", "token #{access_token()}"},
{"Accept", " application/json"},
{"Content-Type", "application/json"}
]
end
def process_response_body(""), do: ""
def process_response_body(body) do
body
|> Jason.decode!
end
end

Some files were not shown because too many files have changed in this diff Show More