add tailscale
Some checks are pending
fp/our CI/CD / build (push) Waiting to run
ci / test (push) Waiting to run

This commit is contained in:
CJ_Clippy 2025-10-07 20:17:41 -08:00
parent 8ef71691e0
commit f1695d1d8d
38 changed files with 86 additions and 1621 deletions

View File

@ -1,13 +0,0 @@
---
- name: Mount vfs
ansible.posix.mount:
src: "{{ vfs_mount_tag }}"
path: /mnt/vfs
fstype: virtiofs
state: mounted
- name: Restart bright
community.docker.docker_container:
name: bright
state: started
restart: true

View File

@ -1,85 +0,0 @@
---
# Terraform Vultr provider doesn't have a VFS resource/datasource yet.
# This is a workaround for that missing feature.
#
# @see https://github.com/vultr/terraform-provider-vultr/issues/560
- name: Get the VFS id
ansible.builtin.uri:
url: https://api.vultr.com/v2/vfs
method: GET
status_code: 200
headers:
Authorization: "Bearer {{ lookup('dotenv', 'VULTR_API_KEY', file='../.env') }}"
register: vfs_list
- name: Get VFS variables
ansible.builtin.set_fact:
bright_vfs_id: "{{ vfs_list.json.vfs | selectattr('label', 'equalto', 'bright') | map(attribute='id') | first }}"
- name: Debug the bright VFS id
ansible.builtin.debug:
msg: "The VFS ID for 'bright' is {{ bright_vfs_id }}"
- name: Attach VFS to Vultr instance
ansible.builtin.uri:
url: https://api.vultr.com/v2/vfs/{{ bright_vfs_id }}/attachments/{{ hostvars[inventory_hostname]['vultr_instance_id'] }}
method: PUT
status_code:
- 200
- 201
- 409
headers:
Authorization: "Bearer {{ lookup('dotenv', 'VULTR_API_KEY', file='../.env') }}"
register: vfs_attach
changed_when:
- vfs_attach.json is defined
- "'state' in vfs_attach.json"
- vfs_attach.json.state == "ATTACHED"
notify:
- Mount vfs
- Restart bright
- name: Debug vfs_attach
ansible.builtin.debug:
var: vfs_attach
- name: Get the VFS mount_tag
ansible.builtin.set_fact:
vfs_mount_tag: "{{ vfs_attach.json.mount_tag | default('') }}"
- name: Setup docker container
community.docker.docker_container:
name: bright
image: gitea.futureporn.net/futureporn/bright:latest
pull: always
state: started
ports:
- "4000:4000"
volumes:
- "/mnt/vfs/futureporn:/mnt/vfs/futureporn"
env:
DB_HOST: "{{ hostvars['fp-db-0']['internal_ip'] }}"
DB_USER: "{{ lookup('dotenv', 'DB_USER', file='../.env') }}"
DB_NAME: "bright"
DB_PORT: "5432"
DB_PASS: "{{ lookup('dotenv', 'DB_PASS', file='../.env') }}"
MIX_ENV: prod
PUBLIC_S3_ENDPOINT: https://futureporn-b2.b-cdn.net
PATREON_REDIRECT_URI: https://bright.futureporn.net/auth/patreon/callback
SITE_URL: https://bright.futureporn.net
PHX_HOST: bright.futureporn.net
AWS_BUCKET: futureporn
AWS_REGION: us-west-000
AWS_HOST: s3.us-west-000.backblazeb2.com
SECRET_KEY_BASE: "{{ lookup('dotenv', 'SECRET_KEY_BASE', file='../.env') }}"
PATREON_CLIENT_SECRET: "{{ lookup('dotenv', 'PATREON_CLIENT_SECRET', file='../.env') }}"
PATREON_CLIENT_ID: "{{ lookup('dotenv', 'PATREON_CLIENT_ID', file='../.env') }}"
AWS_ACCESS_KEY_ID: "{{ lookup('dotenv', 'AWS_ACCESS_KEY_ID', file='../.env') }}"
AWS_SECRET_ACCESS_KEY: "{{ lookup('dotenv', 'AWS_SECRET_ACCESS_KEY', file='../.env') }}"
TRACKER_HELPER_ACCESSLIST_URL: https://tracker.futureporn.net/accesslist
TRACKER_HELPER_USERNAME: "{{ lookup('dotenv', 'TRACKER_HELPER_USERNAME', file='../.env') }}"
TRACKER_HELPER_PASSWORD: "{{ lookup('dotenv', 'TRACKER_HELPER_PASSWORD', file='../.env') }}"
TRACKER_URL: https://tracker.futureporn.net:6969
CACHE_DIR: /mnt/vfs/futureporn # we use Vultr File System to share cache among all Phoenix instances

View File

@ -1,3 +0,0 @@
---
coolify_dir: /opt/coolify

View File

@ -1,25 +0,0 @@
---
- name: Install Coolify using official installer
ansible.builtin.shell: |
curl -fsSL https://cdn.coollabs.io/coolify/install.sh | sudo bash
args:
creates: /coolify/docker-compose.yml # adjust if needed to prevent reruns
# @note securely connect to coolify webui using SSH tunneling.
# ssh -L 8000:localhost:8000 root@our
# @see https://coolify.io/docs/knowledge-base/server/firewall
- name: Allow UFW ports
community.general.ufw:
rule: allow
port: "{{ item }}"
proto: tcp
loop:
- 8000 # coolify UI
- 6001 # real-time comms
- 6002 # terminal
- 80
- 443

View File

@ -1,179 +0,0 @@
---
- name: Install apt packages
ansible.builtin.apt:
name:
- python3-psycopg2
update_cache: yes
state: present
- name: Ensure required packages are installed
apt:
name:
- curl
- ca-certificates
- postgresql-common
state: present
update_cache: yes
- name: Create directory for PostgreSQL apt key
file:
path: /usr/share/postgresql-common/pgdg
state: directory
mode: '0755'
- name: Download PostgreSQL signing key
get_url:
url: https://www.postgresql.org/media/keys/ACCC4CF8.asc
dest: /usr/share/postgresql-common/pgdg/apt.postgresql.org.asc
mode: '0644'
- name: Add PostgreSQL APT repository
apt_repository:
repo: "deb [signed-by=/usr/share/postgresql-common/pgdg/apt.postgresql.org.asc] https://apt.postgresql.org/pub/repos/apt {{ ansible_distribution_release }}-pgdg main"
filename: "pgdg"
state: present
update_cache: yes
- name: Update apt cache
apt:
update_cache: yes
- name: Install PostgreSQL 16
apt:
name: postgresql-16
state: present
- name: Start postgres
ansible.builtin.systemd_service:
name: postgresql@16-main
state: started
enabled: true
- name: Create a new database with name "future_porn"
community.postgresql.postgresql_db:
name: future_porn
become: true
become_user: postgres
- name: Create postgres user and set password
community.postgresql.postgresql_user:
name: postgres
password: "{{ lookup('dotenv', 'DB_PASSWORD', file='../../../.env.production') }}"
expires: infinity
become: true
become_user: postgres
- name: Grant privs
community.postgresql.postgresql_privs:
login_db: "{{ lookup('dotenv', 'DB_NAME', file='../../../.env.production') }}"
roles: "{{ lookup('dotenv', 'DB_USER', file='../../../.env.production') }}"
type: schema
objs: public
privs: ALL
become: true
become_user: postgres
- name: Allow access on Vultr VPC subnet
community.postgresql.postgresql_pg_hba:
dest: /etc/postgresql/16/main/pg_hba.conf
contype: host
users: all
databases: all
method: scram-sha-256
source: 10.2.112.0/20
keep_comments_at_rules: true
comment: "Vultr VPC"
- name: set listen_addresses
community.postgresql.postgresql_alter_system:
param: listen_addresses
value: '*'
become: true
become_user: postgres
- name: set port
community.postgresql.postgresql_alter_system:
param: port
value: '5432'
become: true
become_user: postgres
# - name: Setup volume
# community.docker.docker_volume:
# name: pg_data
# - name: Setup docker container
# community.docker.docker_container:
# name: postgres
# image: postgres:16
# pull: missing
# state: started
# ports:
# - "0.0.0.0:5432:5432"
# env:
# POSTGRES_USER: "{{ lookup('dotenv', 'DB_USER', file='../../../../.env.production') }}"
# POSTGRES_DB: "{{ lookup('dotenv', 'DB_PASSWORD', file='../../../../.env.production') }}"
# POSTGRES_PASSWORD: "{{ lookup('dotenv', 'DB_PASSWORD', file='../../../../.env.production') }}"
# mounts:
# - type: volume
# target: "/var/lib/postgresql/data"
# source: "pg_data"
- name: Set default UFW policy to deny incoming
community.general.ufw:
state: enabled
policy: deny
direction: incoming
- name: Allow /20 subnet access for VPC
community.general.ufw:
rule: allow
port: '5432'
proto: tcp
from: 10.2.112.0/20
# sudo ufw default deny incoming
# - name: start pgweb
# community.docker.docker_container:
# name: pgweb
# image: sosedoff/pgweb:latest
# pull: missing
# state: started
# ports:
# - "8091:8091"
# env:
# POSTGRES_USER: "postgres"
# POSTGRES_DB: "our"
# POSTGRES_PASSWORD: "{{ lookup('dotenv', 'DB_PASSWORD', file='../../../../.env.production') }}"
# PGWEB_DATABASE_URL: "postgres://{{lookup('dotenv', 'DB_USER', file='../../../../.env.production'):{{lookup('dotenv', 'DB_PASSWORD', file='../../../../.env.production')@postgres:}}}}"
# mounts:
# - type: volume
# target: "/var/lib/postgresql/data"
# source: "pg_data"
# pgweb:
# container_name: out-pgweb
# image: sosedoff/pgweb
# depends_on:
# postgres:
# condition: service_healthy
# environment:
# PGWEB_DATABASE_URL: postgres://${DB_USER}:${DB_PASSWORD}@postgres:5432/${DB_NAME}?sslmode=disable
# ports:
# - "8091:8081"
# healthcheck:
# test: ["CMD", "curl", "-f", "http://localhost:8081"]
# interval: 10s
# retries: 5
# start_period: 10s
# timeout: 10s

View File

@ -1,3 +0,0 @@
---
infisical_caddy_image: caddy:2
infisical_docker_tag: latest-postgres

View File

@ -1,13 +0,0 @@
---
- name: Restart caddy
community.docker.docker_container:
name: infisical-caddy-1
image: "{{ infisical_caddy_image }}"
state: started
restart: true
- name: Restart infisical
community.docker.docker_compose_v2:
project_src: /opt/infisical
state: restarted

View File

@ -1,45 +0,0 @@
---
- name: Ensure infisical directory exists
ansible.builtin.file:
path: /opt/infisical
state: directory
mode: "0755"
- name: Generate .env file
ansible.builtin.template:
src: env.j2
dest: /opt/infisical/.env
mode: "0600"
- name: Install passlib
ansible.builtin.pip:
name: passlib # dependency of Ansible's passwordhash
state: present
- name: Template Caddyfile
ansible.builtin.template:
src: Caddyfile.j2
dest: /opt/infisical/Caddyfile
mode: "0600"
notify:
- Restart caddy
- name: Template Docker Compose file
ansible.builtin.template:
src: docker-compose.yml.j2
dest: /opt/infisical/docker-compose.yml
mode: "0644"
- name: Start up docker-compose.yml
community.docker.docker_compose_v2:
project_src: /opt/infisical
state: present
- name: Configure firewall
community.general.ufw:
rule: allow
port: "{{ item }}"
proto: tcp
loop:
- 443

View File

@ -1,13 +0,0 @@
infisical.futureporn.net {
# basic_auth {
# {{ lookup('dotenv', 'INFISICAL_BASIC_AUTH_USERNAME', file='../../../../.env')}} {{ lookup('dotenv', 'INFISICAL_BASIC_AUTH_PASSWORD', file='../../../../.env') | password_hash('bcrypt') }}
# }
reverse_proxy infisical-backend:8080 {
health_uri /
health_interval 10s
health_timeout 5s
}
}

View File

@ -1,86 +0,0 @@
x-logging: &default-logging
driver: "json-file"
options:
max-size: "${LOG_MAX_SIZE:-20m}"
max-file: "${LOG_MAX_FILE:-10}"
compress: "true"
services:
caddy:
image: {{ infisical_caddy_image }}
restart: unless-stopped
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- caddy_data:/data
ports:
- 443:443
environment:
- BASE_URL=infisical.futureporn.net
logging: *default-logging
networks:
- infisical
backend:
container_name: infisical-backend
image: infisical/infisical:{{ infisical_docker_tag }}
restart: unless-stopped
depends_on:
db:
condition: service_healthy
redis:
condition: service_started
db-migration:
condition: service_completed_successfully
pull_policy: always
env_file: .env
environment:
- NODE_ENV=production
ports:
- 80:8080
networks:
- infisical
redis:
container_name: infisical-redis
image: redis
restart: unless-stopped
env_file: .env
environment:
- ALLOW_EMPTY_PASSWORD=yes
volumes:
- ./volumes/redis:/data
networks:
- infisical
db:
container_name: infisical-db
image: postgres:14-alpine
restart: unless-stopped
env_file: .env
volumes:
- ./volumes/postgres:/var/lib/postgresql/data
networks:
- infisical
healthcheck:
test: "pg_isready --username=${POSTGRES_USER} && psql --username=${POSTGRES_USER} --list"
interval: 5s
timeout: 10s
retries: 10
db-migration:
container_name: infisical-db-migration
depends_on:
db:
condition: service_healthy
image: infisical/infisical:{{ infisical_docker_tag }}
env_file: .env
command: npm run migration:latest
pull_policy: always
networks:
- infisical
networks:
infisical:
volumes:
caddy_data: null

View File

@ -1,23 +0,0 @@
# Website URL
SITE_URL=https://infisical.futureporn.net
# Keys
# Required key for platform encryption/decryption ops
# GENERATE YOUR OWN KEY WITH `openssl rand -hex 16`
ENCRYPTION_KEY={{ lookup('dotenv', 'INFISICAL_ENCRYPTION_KEY', file='../../../../.env') }}
# JWT
# Required secrets to sign JWT tokens
# GENERATE YOUR OWN KEY WITH `openssl rand -base64 32`
AUTH_SECRET={{ lookup('dotenv', 'INFISICAL_AUTH_SECRET', file='../../../../.env') }}
# Postgres
POSTGRES_PASSWORD={{ lookup('dotenv', 'INFISICAL_POSTGRES_PASSWORD', file='../../../../.env') }}
POSTGRES_USER=infisical
POSTGRES_DB=infisical
# Do not change the next line
DB_CONNECTION_URI=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
# Redis
# Do not change the next line
REDIS_URL=redis://redis:6379

View File

@ -1,29 +0,0 @@
---
- name: Ensure komodo directory exists
ansible.builtin.file:
path: /opt/komodo
state: directory
mode: "0755"
- name: Get docker compose file
ansible.builtin.get_url:
url: komodo https://raw.githubusercontent.com/moghtech/komodo/main/compose/ferretdb.compose.yaml
dest: /opt/komodo
mode: "0755"
- name: Get .env file
ansible.builtin.get_url:
url: https://raw.githubusercontent.com/moghtech/komodo/main/compose/compose.env
dest: /opt/komodo
mode: "0755"
# we need to use lineinfile to set the following
- name: set config
ansible.builtin.lineinfile:
- name: Run Komodo core

View File

@ -1,119 +0,0 @@
# KOMODO_DISABLE_USER_REGISTRATION=true
# KOMODO_ENABLE_NEW_USERS=false
# KOMODO_DISABLE_NON_ADMIN_CREATE=true
# KOMODO_HOST=https://komodo.future.porn
# KOMODO_DB_USERNAME=admin
# KOMODO_DB_PASSWORD=admin
# KOMODO_PASSKEY=a_random_passkey
####################################
# 🦎 KOMODO COMPOSE - VARIABLES 🦎 #
####################################
## These compose variables can be used with all Komodo deployment options.
## Pass these variables to the compose up command using `--env-file komodo/compose.env`.
## Additionally, they are passed to both Komodo Core and Komodo Periphery with `env_file: ./compose.env`,
## so you can pass any additional environment variables to Core / Periphery directly in this file as well.
## Stick to a specific version, or use `latest`
COMPOSE_KOMODO_IMAGE_TAG=latest
## DB credentials
KOMODO_DB_USERNAME=admin
KOMODO_DB_PASSWORD=admin
## Configure a secure passkey to authenticate between Core / Periphery.
KOMODO_PASSKEY=a_random_passkey
## Set your time zone for schedules
## https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
TZ=Etc/UTC
#=-------------------------=#
#= Komodo Core Environment =#
#=-------------------------=#
## Full variable list + descriptions are available here:
## 🦎 https://github.com/moghtech/komodo/blob/main/config/core.config.toml 🦎
## Note. Secret variables also support `${VARIABLE}_FILE` syntax to pass docker compose secrets.
## Docs: https://docs.docker.com/compose/how-tos/use-secrets/#examples
## Used for Oauth / Webhook url suggestion / Caddy reverse proxy.
KOMODO_HOST="{{ komodo_host }}"
## Displayed in the browser tab.
KOMODO_TITLE=fp Komodo
## Create a server matching this address as the "first server".
## Use `https://host.docker.internal:8120` when using systemd-managed Periphery.
KOMODO_FIRST_SERVER=https://periphery:8120
## Make all buttons just double-click, rather than the full confirmation dialog.
KOMODO_DISABLE_CONFIRM_DIALOG=false
## Rate Komodo polls your servers for
## status / container status / system stats / alerting.
## Options: 1-sec, 5-sec, 15-sec, 1-min, 5-min, 15-min
## Default: 15-sec
KOMODO_MONITORING_INTERVAL="15-sec"
## Interval at which to poll Resources for any updates / automated actions.
## Options: 15-min, 1-hr, 2-hr, 6-hr, 12-hr, 1-day
## Default: 1-hr
KOMODO_RESOURCE_POLL_INTERVAL="1-hr"
## Used to auth incoming webhooks. Alt: KOMODO_WEBHOOK_SECRET_FILE
KOMODO_WEBHOOK_SECRET=a_random_secret
## Used to generate jwt. Alt: KOMODO_JWT_SECRET_FILE
KOMODO_JWT_SECRET=a_random_jwt_secret
## Time to live for jwt tokens.
## Options: 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk
KOMODO_JWT_TTL="1-day"
## Enable login with username + password.
KOMODO_LOCAL_AUTH=true
## Disable new user signups.
KOMODO_DISABLE_USER_REGISTRATION=false
## All new logins are auto enabled
KOMODO_ENABLE_NEW_USERS=false
## Disable non-admins from creating new resources.
KOMODO_DISABLE_NON_ADMIN_CREATE=false
## Allows all users to have Read level access to all resources.
KOMODO_TRANSPARENT_MODE=false
## Prettier logging with empty lines between logs
KOMODO_LOGGING_PRETTY=false
## More human readable logging of startup config (multi-line)
KOMODO_PRETTY_STARTUP_CONFIG=false
## OIDC Login
KOMODO_OIDC_ENABLED=false
## Must reachable from Komodo Core container
# KOMODO_OIDC_PROVIDER=https://oidc.provider.internal/application/o/komodo
## Change the host to one reachable be reachable by users (optional if it is the same as above).
## DO NOT include the `path` part of the URL.
# KOMODO_OIDC_REDIRECT_HOST=https://oidc.provider.external
## Your OIDC client id
# KOMODO_OIDC_CLIENT_ID= # Alt: KOMODO_OIDC_CLIENT_ID_FILE
## Your OIDC client secret.
## If your provider supports PKCE flow, this can be ommitted.
# KOMODO_OIDC_CLIENT_SECRET= # Alt: KOMODO_OIDC_CLIENT_SECRET_FILE
## Make usernames the full email.
## Note. This does not work for all OIDC providers.
# KOMODO_OIDC_USE_FULL_EMAIL=true
## Add additional trusted audiences for token claims verification.
## Supports comma separated list, and passing with _FILE (for compose secrets).
# KOMODO_OIDC_ADDITIONAL_AUDIENCES=abc,123 # Alt: KOMODO_OIDC_ADDITIONAL_AUDIENCES_FILE
## Github Oauth
KOMODO_GITHUB_OAUTH_ENABLED=false
# KOMODO_GITHUB_OAUTH_ID= # Alt: KOMODO_GITHUB_OAUTH_ID_FILE
# KOMODO_GITHUB_OAUTH_SECRET= # Alt: KOMODO_GITHUB_OAUTH_SECRET_FILE
## Google Oauth
KOMODO_GOOGLE_OAUTH_ENABLED=false
# KOMODO_GOOGLE_OAUTH_ID= # Alt: KOMODO_GOOGLE_OAUTH_ID_FILE
# KOMODO_GOOGLE_OAUTH_SECRET= # Alt: KOMODO_GOOGLE_OAUTH_SECRET_FILE
## Aws - Used to launch Builder instances.
KOMODO_AWS_ACCESS_KEY_ID= # Alt: KOMODO_AWS_ACCESS_KEY_ID_FILE
KOMODO_AWS_SECRET_ACCESS_KEY= # Alt: KOMODO_AWS_SECRET_ACCESS_KEY_FILE

View File

@ -1,95 +0,0 @@
###################################
# 🦎 KOMODO COMPOSE - FERRETDB 🦎 #
###################################
## This compose file will deploy:
## 1. Postgres + FerretDB Mongo adapter (https://www.ferretdb.com)
## 2. Komodo Core
## 3. Komodo Periphery
services:
postgres:
# Recommended: Pin to a specific version
# https://github.com/FerretDB/documentdb/pkgs/container/postgres-documentdb
image: ghcr.io/ferretdb/postgres-documentdb
labels:
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
restart: unless-stopped
# ports:
# - 5432:5432
volumes:
- postgres-data:/var/lib/postgresql/data
environment:
POSTGRES_USER: ${KOMODO_DB_USERNAME}
POSTGRES_PASSWORD: ${KOMODO_DB_PASSWORD}
POSTGRES_DB: postgres
ferretdb:
# Recommended: Pin to a specific version
# https://github.com/FerretDB/FerretDB/pkgs/container/ferretdb
image: ghcr.io/ferretdb/ferretdb
labels:
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
restart: unless-stopped
depends_on:
- postgres
# ports:
# - 27017:27017
volumes:
- ferretdb-state:/state
environment:
FERRETDB_POSTGRESQL_URL: postgres://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@postgres:5432/postgres
core:
image: ghcr.io/moghtech/komodo-core:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
labels:
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
restart: unless-stopped
depends_on:
- ferretdb
ports:
- 9120:9120
env_file: ./compose.env
environment:
KOMODO_DATABASE_ADDRESS: ferretdb:27017
KOMODO_DATABASE_USERNAME: ${KOMODO_DB_USERNAME}
KOMODO_DATABASE_PASSWORD: ${KOMODO_DB_PASSWORD}
volumes:
## Core cache for repos for latest commit hash / contents
- repo-cache:/repo-cache
## Store sync files on server
# - /path/to/syncs:/syncs
## Optionally mount a custom core.config.toml
# - /path/to/core.config.toml:/config/config.toml
## Allows for systemd Periphery connection at
## "http://host.docker.internal:8120"
# extra_hosts:
# - host.docker.internal:host-gateway
## Deploy Periphery container using this block,
## or deploy the Periphery binary with systemd using
## https://github.com/moghtech/komodo/tree/main/scripts
periphery:
image: ghcr.io/moghtech/komodo-periphery:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
labels:
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
restart: unless-stopped
env_file: ./compose.env
volumes:
## Mount external docker socket
- /var/run/docker.sock:/var/run/docker.sock
## Allow Periphery to see processes outside of container
- /proc:/proc
## Specify the Periphery agent root directory.
## Must be the same inside and outside the container,
## or docker will get confused. See https://github.com/moghtech/komodo/discussions/180.
## Default: /etc/komodo.
- ${PERIPHERY_ROOT_DIRECTORY:-/etc/komodo}:${PERIPHERY_ROOT_DIRECTORY:-/etc/komodo}
volumes:
# Postgres
postgres-data:
# FerretDB
ferretdb-state:
# Core
repo-cache:

View File

@ -1,3 +0,0 @@
---
our_caddy_image: caddy:2

View File

@ -1,8 +0,0 @@
---
- name: Restart app
ansible.builtin.systemd_service:
name: our-server
state: restarted
enabled: true
daemon_reload: true

View File

@ -1,198 +0,0 @@
---
- name: Create futureporn group
ansible.builtin.group:
name: futureporn
state: present
- name: Create futureporn user
ansible.builtin.user:
name: futureporn
group: futureporn
create_home: true
home: /home/futureporn
system: true
- name: Ensure futureporn directory exists
ansible.builtin.file:
path: /opt/futureporn
state: directory
mode: "0755"
notify:
- Restart app
- name: Ensure config directory exists
ansible.builtin.file:
path: /usr/local/etc/futureporn/our
state: directory
mode: "0755"
notify:
- Restart app
- name: Generate .env file
ansible.builtin.template:
src: env.j2
dest: "{{ env_file }}"
mode: "0600"
notify:
- Restart app
- name: Download Futureporn source code
ansible.builtin.git:
repo: https://gitea.futureporn.net/futureporn/fp
dest: /opt/futureporn
version: "{{ our_commit }}"
update: true
tags:
- our
notify:
- Restart app
- name: Install Our packages based on package.json
community.general.npm:
path: "{{ app_dir }}"
- name: Install passlib
ansible.builtin.pip:
name: passlib # dependency of Ansible's passwordhash
state: present
- name: Create our-server service
ansible.builtin.template:
src: our-server.service.j2
dest: /etc/systemd/system/our-server.service
mode: "0644"
notify:
- Restart app
# - name: Template Caddyfile
# ansible.builtin.template:
# src: Caddyfile.j2
# dest: /opt/our/Caddyfile
# mode: "0600"
# notify:
# - Restart caddy
# - name: Template Docker Compose file
# ansible.builtin.template:
# src: docker-compose.yml.j2
# dest: /opt/our/docker-compose.yml
# mode: "0644"
# notify:
# - Restart app
- name: Set default UFW policy to deny incoming
community.general.ufw:
state: enabled
policy: deny
direction: incoming
- name: Configure firewall
community.general.ufw:
rule: allow
port: "{{ item }}"
proto: tcp
loop:
- 443
- 80
- name: Allow /20 subnet access
community.general.ufw:
rule: allow
port: "{{ item }}"
proto: tcp
from: 10.2.112.0/20
loop:
- 3000
# Bright app Reference
# ---
# # Terraform Vultr provider doesn't have a VFS resource/datasource yet.
# # This is a workaround for that missing feature.
# #
# # @see https://github.com/vultr/terraform-provider-vultr/issues/560
# - name: Get the VFS id
# ansible.builtin.uri:
# url: https://api.vultr.com/v2/vfs
# method: GET
# status_code: 200
# headers:
# Authorization: "Bearer {{ lookup('dotenv', 'VULTR_API_KEY', file='../.env') }}"
# register: vfs_list
# - name: Get VFS variables
# ansible.builtin.set_fact:
# our_vfs_id: "{{ vfs_list.json.vfs | selectattr('label', 'equalto', 'our') | map(attribute='id') | first }}"
# - name: Debug the our VFS id
# ansible.builtin.debug:
# msg: "The VFS ID for 'our' is {{ our_vfs_id }}"
# - name: Attach VFS to Vultr instance
# ansible.builtin.uri:
# url: https://api.vultr.com/v2/vfs/{{ our_vfs_id }}/attachments/{{ hostvars[inventory_hostname]['vultr_instance_id'] }}
# method: PUT
# status_code:
# - 200
# - 201
# - 409
# headers:
# Authorization: "Bearer {{ lookup('dotenv', 'VULTR_API_KEY', file='../.env') }}"
# register: vfs_attach
# changed_when:
# - vfs_attach.json is defined
# - "'state' in vfs_attach.json"
# - vfs_attach.json.state == "ATTACHED"
# notify:
# - Mount vfs
# - Restart our
# - name: Debug vfs_attach
# ansible.builtin.debug:
# var: vfs_attach
# - name: Get the VFS mount_tag
# ansible.builtin.set_fact:
# vfs_mount_tag: "{{ vfs_attach.json.mount_tag | default('') }}"
# - name: Setup docker container
# community.docker.docker_container:
# name: our
# image: gitea.futureporn.net/futureporn/our:latest
# pull: always
# state: started
# ports:
# - "4000:4000"
# volumes:
# - "/mnt/vfs/futureporn:/mnt/vfs/futureporn"
# env:
# DB_HOST: "{{ hostvars['fp-db-0']['internal_ip'] }}"
# DB_USER: "{{ lookup('dotenv', 'DB_USER', file='../.env') }}"
# DB_NAME: "our"
# DB_PORT: "5432"
# DB_PASS: "{{ lookup('dotenv', 'DB_PASS', file='../.env') }}"
# MIX_ENV: prod
# PUBLIC_S3_ENDPOINT: https://futureporn-b2.b-cdn.net
# PATREON_REDIRECT_URI: https://our.futureporn.net/auth/patreon/callback
# SITE_URL: https://our.futureporn.net
# PHX_HOST: our.futureporn.net
# AWS_BUCKET: futureporn
# AWS_REGION: us-west-000
# AWS_HOST: s3.us-west-000.backblazeb2.com
# SECRET_KEY_BASE: "{{ lookup('dotenv', 'SECRET_KEY_BASE', file='../.env') }}"
# PATREON_CLIENT_SECRET: "{{ lookup('dotenv', 'PATREON_CLIENT_SECRET', file='../.env') }}"
# PATREON_CLIENT_ID: "{{ lookup('dotenv', 'PATREON_CLIENT_ID', file='../.env') }}"
# AWS_ACCESS_KEY_ID: "{{ lookup('dotenv', 'AWS_ACCESS_KEY_ID', file='../.env') }}"
# AWS_SECRET_ACCESS_KEY: "{{ lookup('dotenv', 'AWS_SECRET_ACCESS_KEY', file='../.env') }}"
# TRACKER_HELPER_ACCESSLIST_URL: https://tracker.futureporn.net/accesslist
# TRACKER_HELPER_USERNAME: "{{ lookup('dotenv', 'TRACKER_HELPER_USERNAME', file='../.env') }}"
# TRACKER_HELPER_PASSWORD: "{{ lookup('dotenv', 'TRACKER_HELPER_PASSWORD', file='../.env') }}"
# TRACKER_URL: https://tracker.futureporn.net:6969
# CACHE_DIR: /mnt/vfs/futureporn # we use Vultr File System to share cache among all Phoenix instances

View File

@ -1,6 +0,0 @@
---
- name: Setup fastify app
ansible.builtin.include_tasks: fastify.yml
tags:
- fastify

View File

@ -1,33 +0,0 @@
ORIGIN=https://future.porn
COOKIE_SECRET={{ lookup('dotenv', 'COOKIE_SECRET', file='../../../../.env.production')}}
DB_USER={{ lookup('dotenv', 'DB_USER', file='../../../../.env.production')}}
DB_PASSWORD={{ lookup('dotenv', 'DB_PASSWORD', file='../../../../.env.production')}}
DB_NAME=future_porn
CDN_ORIGIN=https://fp-usc.b-cdn.net
CDN_TOKEN_SECRET={{ lookup('dotenv', 'CDN_TOKEN_SECRET', file='../../../../.env.production')}}
NODE_ENV=production
DATABASE_URL={{ lookup('dotenv', 'DATABASE_URL', file='../../../../.env.production')}}
PGADMIN_DEFAULT_EMAIL={{ lookup('dotenv', 'PGADMIN_DEFAULT_EMAIL', file='../../../../.env.production')}}
PGADMIN_DEFAULT_PASSWORD={{ lookup('dotenv', 'PGADMIN_DEFAULT_PASSWORD', file='../../../../.env.production')}}
PATREON_CLIENT_ID={{ lookup('dotenv', 'PATREON_CLIENT_ID', file='../../../../.env.production')}}
PATREON_CLIENT_SECRET={{ lookup('dotenv', 'PATREON_CLIENT_SECRET', file='../../../../.env.production')}}
PATREON_API_ORIGIN=https://www.patreon.com
PATREON_AUTHORIZE_PATH=/oauth2/authorize
PATREON_TOKEN_PATH=/api/oauth2/token
S3_BUCKET=fp-usc
S3_REGION=us-west-000
S3_KEY_ID={{ lookup('dotenv', 'S3_KEY_ID', file='../../../../.env.production')}}
S3_APPLICATION_KEY={{ lookup('dotenv', 'S3_APPLICATION_KEY', file='../../../../.env.production')}}
S3_ENDPOINT=https://s3.us-west-000.backblazeb2.com
CACHE_ROOT='/mnt/vfs/futureporn/our'

View File

@ -1,18 +0,0 @@
[Unit]
Description=FuturePorn Our Server
After=network.target
[Service]
Type=simple
WorkingDirectory={{ app_dir }}
ExecStart=/usr/bin/env /usr/bin/npx tsx src/index.ts
#ExecStart=/usr/bin/env /usr/bin/npx dotenvx run -f {{ env_file }} -- npx tsx src/index.ts
Restart=always
RestartSec=5
User={{ app_user }}
EnvironmentFile={{ env_file }}
[Install]
WantedBy=multi-user.target

View File

@ -1,16 +0,0 @@
[Unit]
Description=FuturePorn Our Worker
After=network.target
[Service]
Type=simple
WorkingDirectory={{ app_dir }}
ExecStart=/usr/bin/env NODE_ENV=production /usr/bin/node dist/worker.js
Restart=on-failure
User={{ app_user }}
EnvironmentFile={{ env_file }}
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target

View File

@ -1,7 +0,0 @@
---
app_user: futureporn
app_dir: /opt/futureporn/services/our
app_entry: dist/main.js
env_file: /usr/local/etc/futureporn/our/env
nodejs_version: "20.x"
our_commit: main

View File

@ -1,2 +0,0 @@
---

View File

@ -1,8 +0,0 @@
---
- name: Restart worker
ansible.builtin.systemd_service:
name: our-worker
state: restarted
enabled: true
daemon_reload: true

View File

@ -1,75 +0,0 @@
---
- name: Create futureporn group
ansible.builtin.group:
name: futureporn
state: present
- name: Create futureporn user
ansible.builtin.user:
name: futureporn
group: futureporn
create_home: true
home: /home/futureporn
system: true
- name: Ensure futureporn directory exists
ansible.builtin.file:
path: /opt/futureporn
state: directory
mode: "0755"
notify:
- restart worker
- name: Ensure config directory exists
ansible.builtin.file:
path: /usr/local/etc/futureporn/our
state: directory
mode: "0755"
notify:
- restart worker
- name: Generate .env file
ansible.builtin.template:
src: env.j2
dest: "{{ env_file }}"
mode: "0600"
notify:
- restart worker
- name: Download Futureporn source code
ansible.builtin.git:
repo: https://gitea.futureporn.net/futureporn/fp
dest: /opt/futureporn
version: "{{ our_commit }}"
update: true
tags:
- our
notify:
- Restart worker
- name: Install Our packages based on package.json
community.general.npm:
path: "{{ app_dir }}"
- name: Install passlib
ansible.builtin.pip:
name: passlib # dependency of Ansible's passwordhash
state: present
- name: Create our-worker service
ansible.builtin.template:
src: our-worker.service.j2
dest: /etc/systemd/system/our-worker.service
mode: "0644"
notify:
- restart worker
- name: Set default UFW policy to deny incoming
community.general.ufw:
state: enabled
policy: deny
direction: incoming

View File

@ -1,30 +0,0 @@
ORIGIN=https://future.porn
COOKIE_SECRET={{ lookup('dotenv', 'COOKIE_SECRET', file='../../../../.env.production')}}
DB_USER={{ lookup('dotenv', 'DB_USER', file='../../../../.env.production')}}
DB_PASSWORD={{ lookup('dotenv', 'DB_PASSWORD', file='../../../../.env.production')}}
DB_NAME=future_porn
CDN_ORIGIN=https://fp-usc.b-cdn.net
CDN_TOKEN_SECRET={{ lookup('dotenv', 'CDN_TOKEN_SECRET', file='../../../../.env.production')}}
NODE_ENV=production
DATABASE_URL={{ lookup('dotenv', 'DATABASE_URL', file='../../../../.env.production')}}
PGADMIN_DEFAULT_EMAIL={{ lookup('dotenv', 'PGADMIN_DEFAULT_EMAIL', file='../../../../.env.production')}}
PGADMIN_DEFAULT_PASSWORD={{ lookup('dotenv', 'PGADMIN_DEFAULT_PASSWORD', file='../../../../.env.production')}}
PATREON_CLIENT_ID={{ lookup('dotenv', 'PATREON_CLIENT_ID', file='../../../../.env.production')}}
PATREON_CLIENT_SECRET={{ lookup('dotenv', 'PATREON_CLIENT_SECRET', file='../../../../.env.production')}}
PATREON_API_ORIGIN=https://www.patreon.com
PATREON_AUTHORIZE_PATH=/oauth2/authorize
PATREON_TOKEN_PATH=/api/oauth2/token
S3_BUCKET=fp-usc
S3_REGION=us-west-000
S3_KEY_ID={{ lookup('dotenv', 'S3_KEY_ID', file='../../../../.env.production') }}
S3_APPLICATION_KEY={{ lookup('dotenv', 'S3_APPLICATION_KEY', file='../../../../.env.production')}}
S3_ENDPOINT=https://s3.us-west-000.backblazeb2.com
CACHE_ROOT='/mnt/vfs/futureporn/our'

View File

@ -1,14 +0,0 @@
[Unit]
Description=FuturePorn Our Worker
After=network.target
[Service]
Type=simple
WorkingDirectory={{ app_dir }}
ExecStart=/usr/bin/env /usr/bin/npx tsx src/worker.ts
Restart=on-failure
User={{ app_user }}
EnvironmentFile={{ env_file }}
[Install]
WantedBy=multi-user.target

View File

@ -1,7 +0,0 @@
---
app_user: futureporn
app_dir: /opt/futureporn/services/our
app_entry: src/worker.ts
env_file: /usr/local/etc/futureporn/our/env
nodejs_version: "20.x"
our_commit: main

View File

@ -1,218 +0,0 @@
#!/bin/bash
set -e # Stop script execution on error
NGINX_CONF_PATH="./docker/nginx/active_backend.conf"
NGINX_CONTAINER="app"
ENV_FILE=".env"
build_containers() {
echo "📦 Building Docker containers..."
docker compose build
echo "✅ Docker containers built successfully."
}
prepare_nginx_config() {
if [ ! -d "./docker/nginx" ]; then
echo "📂 Nginx directory not found. Creating it..."
mkdir -p ./docker/nginx
echo "✅ Nginx directory created."
fi
}
update_nginx_config() {
local active_color=$1
echo "🔄 Updating Nginx configuration to route traffic to '$active_color' containers..."
cat > "$NGINX_CONF_PATH" <<EOL
upstream app_backend {
server $active_color:9000 max_fails=3 fail_timeout=30s;
}
EOL
echo "📋 Copying Nginx configuration to the container..."
docker cp "$NGINX_CONF_PATH" "$NGINX_CONTAINER:/etc/nginx/conf.d/active_backend.conf"
echo "🔁 Reloading Nginx to apply the new configuration..."
docker exec "$NGINX_CONTAINER" nginx -s reload >/dev/null 2>&1
echo "✅ Nginx configuration updated and reloaded successfully."
}
wait_for_health() {
local container_prefix=$1
local retries=5
local unhealthy_found
echo "⏳ Waiting for containers with prefix '$container_prefix' to become healthy..."
while (( retries > 0 )); do
unhealthy_found=false
for container_name in $(docker ps --filter "name=$container_prefix" --format "{{.Names}}"); do
health_status=$(docker inspect --format '{{if .State.Health}}{{.State.Health.Status}}{{else}}unknown{{end}}' "$container_name" || echo "unknown")
if [[ "$health_status" != "healthy" ]]; then
unhealthy_found=true
echo "🚧 Container '$container_name' is not ready. Current status: $health_status."
fi
done
if ! $unhealthy_found; then
echo "✅ All containers with prefix '$container_prefix' are healthy."
return 0
fi
echo "⏳ Retrying... ($retries retries left)"
((retries--))
sleep 5
done
echo "❌ Error: Some containers with prefix '$container_prefix' are not healthy. Aborting deployment."
rollback
exit 0
}
rollback() {
echo "🛑 Rolling back deployment. Ensuring the active environment remains intact."
if [ -n "$PREVIOUS_COLOR" ]; then
echo "🔄 Restoring CONTAINER_COLOR=$PREVIOUS_COLOR in .env."
sed -i.bak "s/^CONTAINER_COLOR=.*/CONTAINER_COLOR=$PREVIOUS_COLOR/" "$ENV_FILE"
rm -f "$ENV_FILE.bak"
echo "✅ Restored CONTAINER_COLOR=$PREVIOUS_COLOR in .env."
else
echo "🚧 No previous CONTAINER_COLOR found to restore."
fi
if docker ps --filter "name=green" --format "{{.Names}}" | grep -q "green"; then
echo "✅ Active environment 'green' remains intact."
echo "🛑 Stopping and removing 'blue' containers..."
docker compose stop "blue" >/dev/null 2>&1 || true
docker compose rm -f "blue" >/dev/null 2>&1 || true
elif docker ps --filter "name=blue" --format "{{.Names}}" | grep -q "blue"; then
echo "✅ Active environment 'blue' remains intact."
echo "🛑 Stopping and removing 'green' containers..."
docker compose stop "green" >/dev/null 2>&1 || true
docker compose rm -f "green" >/dev/null 2>&1 || true
else
echo "❌ No active environment detected after rollback. Manual intervention might be needed."
fi
echo "🔄 Rollback completed."
}
update_env_file() {
local active_color=$1
# check if .env file exists
if [ ! -f "$ENV_FILE" ]; then
echo "❌ .env file not found. Creating a new one..."
echo "CONTAINER_COLOR=$active_color" > "$ENV_FILE"
echo "✅ Created .env file with CONTAINER_COLOR=$active_color."
return
fi
# backup previous CONTAINER_COLOR value
if grep -q "^CONTAINER_COLOR=" "$ENV_FILE"; then
PREVIOUS_COLOR=$(grep "^CONTAINER_COLOR=" "$ENV_FILE" | cut -d '=' -f 2)
echo "♻️ Backing up previous CONTAINER_COLOR=$PREVIOUS_COLOR."
else
PREVIOUS_COLOR=""
fi
# update CONTAINER_COLOR value in .env
if grep -q "^CONTAINER_COLOR=" "$ENV_FILE"; then
sed -i.bak "s/^CONTAINER_COLOR=.*/CONTAINER_COLOR=$active_color/" "$ENV_FILE"
echo "🔄 Updated CONTAINER_COLOR=$active_color in .env"
else
echo "CONTAINER_COLOR=$active_color" >> "$ENV_FILE"
echo "🖋️ Added CONTAINER_COLOR=$active_color to .env"
fi
# remove backup file
if [ -f "$ENV_FILE.bak" ]; then
rm "$ENV_FILE.bak"
fi
}
install_dependencies() {
local container=$1
echo "📥 Installing dependencies in container '$container'..."
# Install Laravel dependencies
docker exec -u root -it "$container" bash -c "composer install --no-dev --optimize-autoloader"
docker exec -u root -it "$container" bash -c "mkdir -p database && touch database/database.sqlite"
# Permissions setup
docker exec -u root -it "$container" bash -c "chown www-data:www-data -R ./storage ./bootstrap ./database"
docker exec -u root -it "$container" bash -c "chmod -R 775 ./storage ./bootstrap/cache"
# Clear caches and run migrations
docker exec -u root -it "$container" bash -c "php artisan cache:clear"
docker exec -u root -it "$container" bash -c "php artisan config:clear"
docker exec -u root -it "$container" bash -c "php artisan route:clear"
docker exec -u root -it "$container" bash -c "php artisan view:clear"
docker exec -u root -it "$container" bash -c "php artisan migrate --force"
echo "✅ Dependencies installed and database initialized successfully in container '$container'."
}
deploy() {
local active=$1
local new=$2
# Update .env before deploying
update_env_file "$new"
echo "🚀 Starting deployment. Current active environment: '$active'. Deploying to '$new'..."
docker compose --profile "$new" up -d
wait_for_health "$new"
install_dependencies "$new"
update_nginx_config "$new"
echo "🗑️ Removing old environment: '$active'..."
echo "🛑 Stopping '$active' containers..."
docker compose stop $active >/dev/null 2>&1 || true
echo "🗑️ Removing '$active' containers..."
docker compose rm -f $active >/dev/null 2>&1 || true
update_env_file "$new"
echo "✅ Deployment to '$new' completed successfully."
}
get_active_container() {
if [ -f "$ENV_FILE" ] && grep -q "CONTAINER_COLOR" "$ENV_FILE"; then
grep "CONTAINER_COLOR" "$ENV_FILE" | cut -d '=' -f 2
else
echo ""
fi
}
# Main script logic
prepare_nginx_config
build_containers
ACTIVE_COLOR=$(get_active_container)
if [ -z "$ACTIVE_COLOR" ]; then
# if no active container found, deploy 'blue'
echo "🟦 Initial setup. Bringing up 'blue' containers..."
docker compose --profile blue up -d
wait_for_health "blue"
install_dependencies "blue"
update_nginx_config "blue"
update_env_file "blue"
elif [ "$ACTIVE_COLOR" == "green" ]; then
# if the active is 'green', deploy 'blue'
PREVIOUS_COLOR="green"
deploy "green" "blue"
elif [ "$ACTIVE_COLOR" == "blue" ]; then
# if the active is 'blue', deploy 'green'
PREVIOUS_COLOR="blue"
deploy "blue" "green"
else
# if the active is neither 'green' nor 'blue', reset to 'blue'
echo "🚧 Unexpected CONTAINER_COLOR value. Resetting to 'blue'..."
PREVIOUS_COLOR=""
docker compose --profile blue up -d
wait_for_health "blue"
install_dependencies "blue"
update_nginx_config "blue"
update_env_file "blue"
fi
echo "🎉 Deployment successful!"

View File

@ -1,46 +0,0 @@
---
# Terraform Vultr provider doesn't expose the mount_tag.
# it does however expose the vfs id, which we save to ansible host vars at time of `tofu apply`.
# As a workaround, we use Vultr api to fetch the mount_tag, and mount the vfs to the instance.
- name: Get the VFS data
ansible.builtin.uri:
url: https://api.vultr.com/v2/vfs
method: GET
status_code: 200
headers:
Authorization: "Bearer {{ lookup('dotenv', 'VULTR_API_KEY', file='../.env') }}"
register: vfs_list
- name: Get VFS variables
ansible.builtin.set_fact:
our_vfs_id: "{{ vfs_list.json.vfs | selectattr('tags', 'contains', 'our') | map(attribute='id') | first }}"
- name: Debug the our VFS id
ansible.builtin.debug:
msg: "The VFS ID for 'our' is {{ our_vfs_id }}"
- name: Attach VFS to Vultr instance
ansible.builtin.uri:
url: https://api.vultr.com/v2/vfs/{{ vultr_vfs_storage_id }}/attachments/{{ hostvars[inventory_hostname]['vultr_instance_id'] }}
method: PUT
status_code:
- 200
- 201
- 409
headers:
Authorization: "Bearer {{ lookup('dotenv', 'VULTR_API_KEY', file='../.env') }}"
register: vfs_attach
changed_when:
- vfs_attach.json is defined
- "'state' in vfs_attach.json"
- vfs_attach.json.state == "ATTACHED"
notify:
- Mount vfs
- name: Debug vfs_attach
ansible.builtin.debug:
var: vfs_attach
- name: Get the VFS mount_tag
ansible.builtin.set_fact:
our_vfs_mount_tag: "{{ vfs_attach.json.mount_tag | default('') }}"

View File

@ -31,6 +31,19 @@
loop: loop:
- compose.production.yaml - compose.production.yaml
- name: Load environment variables
set_fact:
our_env_vars: "{{ lookup('community.general.read_dotenv', '../../../../.env.production') }}"
- name: Create Docker secrets dynamically
when: inventory_hostname == (groups['swarm'] | first)
community.docker.docker_secret:
name: "{{ item.key }}"
data: "{{ item.value | b64encode }}"
data_is_b64: true
state: present
loop: "{{ our_env_vars | dict2items }}"
- name: Deploy stack - name: Deploy stack
when: inventory_hostname == groups['swarm'] | first when: inventory_hostname == groups['swarm'] | first
community.docker.docker_stack: community.docker.docker_stack:
@ -39,56 +52,55 @@
compose: compose:
- /etc/futureporn/our/compose.production.yaml - /etc/futureporn/our/compose.production.yaml
- services: - services:
postgres: environment:
environment: server:
POSTGRES_USER: "{{ lookup('dotenv', 'POSTGRES_USER', file='../../../../.env.production') }}" DATABASE_URL: "{{ our_env_vars.DATABASE_URL }}"
POSTGRES_PASSWORD: "{{ lookup('dotenv', 'POSTGRES_PASSWORD', file='../../../../.env.production') }}" NODE_ENV: "{{ our_env_vars.NODE_ENV }}"
POSTGRES_DB: "{{ lookup('dotenv', 'POSTGRES_DB', file='../../../../.env.production') }}" ORIGIN: "{{ our_env_vars.ORIGIN }}"
worker: PATREON_API_ORIGIN: "{{ our_env_vars.PATREON_API_ORIGIN }}"
environment: PATREON_AUTHORIZE_PATH: "{{ our_env_vars.PATREON_AUTHORIZE_PATH }}"
DATABASE_URL: "{{ lookup('dotenv', 'DATABASE_URL', file='../../../../.env.production') }}" PATREON_TOKEN_PATH: "{{ our_env_vars.PATREON_TOKEN_PATH }}"
NODE_ENV: "{{ lookup('dotenv', 'NODE_ENV', file='../../../../.env.production') }}" PATREON_CLIENT_ID: "{{ our_env_vars.PATREON_CLIENT_ID }}"
ORIGIN: "{{ lookup('dotenv', 'ORIGIN', file='../../../../.env.production') }}" PATREON_CLIENT_SECRET: "{{ our_env_vars.PATREON_CLIENT_SECRET }}"
PATREON_API_ORIGIN: "{{ lookup('dotenv', 'PATREON_API_ORIGIN', file='../../../../.env.production') }}" COOKIE_SECRET: "{{ our_env_vars.COOKIE_SECRET }}"
PATREON_AUTHORIZE_PATH: "{{ lookup('dotenv', 'PATREON_AUTHORIZE_PATH', file='../../../../.env.production') }}" S3_REGION: "{{ our_env_vars.S3_REGION }}"
PATREON_TOKEN_PATH: "{{ lookup('dotenv', 'PATREON_TOKEN_PATH', file='../../../../.env.production') }}" S3_BUCKET: "{{ our_env_vars.S3_BUCKET }}"
PATREON_CLIENT_ID: "{{ lookup('dotenv', 'PATREON_CLIENT_ID', file='../../../../.env.production') }}" S3_APPLICATION_KEY: "{{ our_env_vars.S3_APPLICATION_KEY }}"
PATREON_CLIENT_SECRET: "{{ lookup('dotenv', 'PATREON_CLIENT_SECRET', file='../../../../.env.production') }}" S3_KEY_ID: "{{ our_env_vars.S3_KEY_ID }}"
COOKIE_SECRET: "{{ lookup('dotenv', 'COOKIE_SECRET', file='../../../../.env.production') }}" S3_ENDPOINT: "{{ our_env_vars.S3_ENDPOINT }}"
S3_REGION: "{{ lookup('dotenv', 'S3_REGION', file='../../../../.env.production') }}" CDN_ORIGIN: "{{ our_env_vars.CDN_ORIGIN }}"
S3_BUCKET: "{{ lookup('dotenv', 'S3_BUCKET', file='../../../../.env.production') }}" CDN_TOKEN_SECRET: "{{ our_env_vars.CDN_TOKEN_SECRET }}"
S3_APPLICATION_KEY: "{{ lookup('dotenv', 'S3_APPLICATION_KEY', file='../../../../.env.production') }}" WHISPER_DIR: "{{ our_env_vars.WHISPER_DIR }}"
S3_KEY_ID: "{{ lookup('dotenv', 'S3_KEY_ID', file='../../../../.env.production') }}" B2_APPLICATION_KEY_ID: "{{ our_env_vars.B2_APPLICATION_KEY_ID }}"
S3_ENDPOINT: "{{ lookup('dotenv', 'S3_ENDPOINT', file='../../../../.env.production') }}" B2_APPLICATION_KEY: "{{ our_env_vars.B2_APPLICATION_KEY }}"
CDN_ORIGIN: "{{ lookup('dotenv', 'CDN_ORIGIN', file='../../../../.env.production') }}" SEEDBOX_SFTP_URL: "{{ our_env_vars.SEEDBOX_SFTP_URL }}"
CDN_TOKEN_SECRET: "{{ lookup('dotenv', 'CDN_TOKEN_SECRET', file='../../../../.env.production') }}" SEEDBOX_SFTP_USERNAME: "{{ our_env_vars.SEEDBOX_SFTP_USERNAME }}"
WHISPER_DIR: "{{ lookup('dotenv', 'WHISPER_DIR', file='../../../../.env.production') }}" SEEDBOX_SFTP_PASSWORD: "{{ our_env_vars.SEEDBOX_SFTP_PASSWORD }}"
B2_APPLICATION_KEY_ID: "{{ lookup('dotenv', 'B2_APPLICATION_KEY_ID', file='../../../../.env.production') }}" worker:
B2_APPLICATION_KEY: "{{ lookup('dotenv', 'B2_APPLICATION_KEY', file='../../../../.env.production') }}" DATABASE_URL: "{{ our_env_vars.DATABASE_URL }}"
SEEDBOX_SFTP_URL: "{{ lookup('dotenv', 'SEEDBOX_SFTP_URL', file='../../../../.env.production') }}" NODE_ENV: "{{ our_env_vars.NODE_ENV }}"
SEEDBOX_SFTP_USERNAME: "{{ lookup('dotenv', 'SEEDBOX_SFTP_USERNAME', file='../../../../.env.production') }}" ORIGIN: "{{ our_env_vars.ORIGIN }}"
SEEDBOX_SFTP_PASSWORD: "{{ lookup('dotenv', 'SEEDBOX_SFTP_PASSWORD', file='../../../../.env.production') }}" PATREON_API_ORIGIN: "{{ our_env_vars.PATREON_API_ORIGIN }}"
server: PATREON_AUTHORIZE_PATH: "{{ our_env_vars.PATREON_AUTHORIZE_PATH }}"
environment: PATREON_TOKEN_PATH: "{{ our_env_vars.PATREON_TOKEN_PATH }}"
DATABASE_URL: "{{ lookup('dotenv', 'DATABASE_URL', file='../../../../.env.production') }}" PATREON_CLIENT_ID: "{{ our_env_vars.PATREON_CLIENT_ID }}"
NODE_ENV: "{{ lookup('dotenv', 'NODE_ENV', file='../../../../.env.production') }}" PATREON_CLIENT_SECRET: "{{ our_env_vars.PATREON_CLIENT_SECRET }}"
ORIGIN: "{{ lookup('dotenv', 'ORIGIN', file='../../../../.env.production') }}" COOKIE_SECRET: "{{ our_env_vars.COOKIE_SECRET }}"
PATREON_API_ORIGIN: "{{ lookup('dotenv', 'PATREON_API_ORIGIN', file='../../../../.env.production') }}" S3_REGION: "{{ our_env_vars.S3_REGION }}"
PATREON_AUTHORIZE_PATH: "{{ lookup('dotenv', 'PATREON_AUTHORIZE_PATH', file='../../../../.env.production') }}" S3_BUCKET: "{{ our_env_vars.S3_BUCKET }}"
PATREON_TOKEN_PATH: "{{ lookup('dotenv', 'PATREON_TOKEN_PATH', file='../../../../.env.production') }}" S3_APPLICATION_KEY: "{{ our_env_vars.S3_APPLICATION_KEY }}"
PATREON_CLIENT_ID: "{{ lookup('dotenv', 'PATREON_CLIENT_ID', file='../../../../.env.production') }}" S3_KEY_ID: "{{ our_env_vars.S3_KEY_ID }}"
PATREON_CLIENT_SECRET: "{{ lookup('dotenv', 'PATREON_CLIENT_SECRET', file='../../../../.env.production') }}" S3_ENDPOINT: "{{ our_env_vars.S3_ENDPOINT }}"
COOKIE_SECRET: "{{ lookup('dotenv', 'COOKIE_SECRET', file='../../../../.env.production') }}" CDN_ORIGIN: "{{ our_env_vars.CDN_ORIGIN }}"
S3_REGION: "{{ lookup('dotenv', 'S3_REGION', file='../../../../.env.production') }}" CDN_TOKEN_SECRET: "{{ our_env_vars.CDN_TOKEN_SECRET }}"
S3_BUCKET: "{{ lookup('dotenv', 'S3_BUCKET', file='../../../../.env.production') }}" WHISPER_DIR: "{{ our_env_vars.WHISPER_DIR }}"
S3_APPLICATION_KEY: "{{ lookup('dotenv', 'S3_APPLICATION_KEY', file='../../../../.env.production') }}" B2_APPLICATION_KEY_ID: "{{ our_env_vars.B2_APPLICATION_KEY_ID }}"
S3_KEY_ID: "{{ lookup('dotenv', 'S3_KEY_ID', file='../../../../.env.production') }}" B2_APPLICATION_KEY: "{{ our_env_vars.B2_APPLICATION_KEY }}"
S3_ENDPOINT: "{{ lookup('dotenv', 'S3_ENDPOINT', file='../../../../.env.production') }}" SEEDBOX_SFTP_URL: "{{ our_env_vars.SEEDBOX_SFTP_URL }}"
CDN_ORIGIN: "{{ lookup('dotenv', 'CDN_ORIGIN', file='../../../../.env.production') }}" SEEDBOX_SFTP_USERNAME: "{{ our_env_vars.SEEDBOX_SFTP_USERNAME }}"
CDN_TOKEN_SECRET: "{{ lookup('dotenv', 'CDN_TOKEN_SECRET', file='../../../../.env.production') }}" SEEDBOX_SFTP_PASSWORD: "{{ our_env_vars.SEEDBOX_SFTP_PASSWORD }}"
WHISPER_DIR: "{{ lookup('dotenv', 'WHISPER_DIR', file='../../../../.env.production') }}" pgadmin:
B2_APPLICATION_KEY_ID: "{{ lookup('dotenv', 'B2_APPLICATION_KEY_ID', file='../../../../.env.production') }}" PGADMIN_DEFAULT_EMAIL: "{{ our_env_vars.PGADMIN_DEFAULT_EMAIL }}"
B2_APPLICATION_KEY: "{{ lookup('dotenv', 'B2_APPLICATION_KEY', file='../../../../.env.production') }}" PGADMIN_DEFAULT_PASSWORD: "{{ our_env_vars.PGADMIN_DEFAULT_PASSWORD }}"
SEEDBOX_SFTP_URL: "{{ lookup('dotenv', 'SEEDBOX_SFTP_URL', file='../../../../.env.production') }}" tailscale-pgadmin:
SEEDBOX_SFTP_USERNAME: "{{ lookup('dotenv', 'SEEDBOX_SFTP_USERNAME', file='../../../../.env.production') }}" TS_AUTHKEY: "{{ our_env_vars.TS_AUTHKEY }}"
SEEDBOX_SFTP_PASSWORD: "{{ lookup('dotenv', 'SEEDBOX_SFTP_PASSWORD', file='../../../../.env.production') }}"

View File

@ -1,49 +0,0 @@
---
- name: Configure firewall
community.general.ufw:
rule: allow
port: "{{ item }}"
proto: tcp
loop:
- 80
- 443
- 9000
- name: Allow UDP port 6969
community.general.ufw:
rule: allow
port: "6969"
proto: udp
- name: Install Caddy
ansible.builtin.import_role:
name: nvjacobo.caddy
- name: Configure Caddyfile
ansible.builtin.template:
src: 'templates/Caddyfile.j2'
dest: /etc/caddy/Caddyfile
mode: "0644"
notify: restart caddy
# @todo performance enhancement is to run aquatic outside of docker.
# @see https://github.com/greatest-ape/aquatic/blob/34b45e923f84421181fc43cf5e20709e69ce0dfd/docker/aquatic_udp.Dockerfile#L5
- name: Setup docker container
community.docker.docker_container:
name: tracker
image: gitea.futureporn.net/futureporn/tracker:latest
pull: always
state: started
ports:
- "6969:6969/udp" # aquatic_udp
- "5063:5063" # tracker-helper
- "9000:9000" # aquatic metrics
env:
TRACKER_HELPER_ACCESSLIST_URL: https://tracker.futureporn.net/accesslist
TRACKER_HELPER_USERNAME: "{{ lookup('dotenv', 'TRACKER_HELPER_USERNAME', file='../.env') }}"
TRACKER_HELPER_PASSWORD: "{{ lookup('dotenv', 'TRACKER_HELPER_PASSWORD', file='../.env') }}"
TRACKER_URL: https://tracker.futureporn.net:6969
TRACKER_HELPER_ACCESSLIST_PATH: /var/lib/aquatic/accesslist

View File

@ -1,13 +0,0 @@
tracker.futureporn.net {
reverse_proxy :5063 {
health_uri /health
health_interval 10s
health_timeout 5s
}
handle_errors {
respond "💥 Error -- {err.status_code} {err.status_text}"
}
}

View File

@ -1,20 +0,0 @@
---
- name: Configure firewall
community.general.ufw:
rule: allow
port: "{{ item }}"
proto: tcp
loop:
- 80
- 443
- name: Install Caddy
ansible.builtin.import_role:
name: nvjacobo.caddy
- name: Configure Caddyfile
ansible.builtin.template:
src: "templates/Caddyfile.j2"
dest: /etc/caddy/Caddyfile
mode: "0644"
notify: restart caddy # nvjacobo.caddy handles this

View File

@ -1,44 +0,0 @@
---
- name: Setup volume
community.docker.docker_volume:
name: pg_data
- name: Setup docker container
community.docker.docker_container:
name: uppy-companion
image: transloadit/companion
pull: missing
state: started
ports:
- "3020:3020"
env:
NODE_ENV: prod
COMPANION_PORT: "{{ lookup('dotenv', 'COMPANION_PORT', file='../.env') }}"
COMPANION_DOMAIN: "{{ lookup('dotenv', 'COMPANION_DOMAIN', file='../.env') }}"
COMPANION_SELF_ENDPOINT: "{{ lookup('dotenv', 'COMPANION_SELF_ENDPOINT', file='../.env') }}"
COMPANION_HIDE_METRICS: "{{ lookup('dotenv', 'COMPANION_HIDE_METRICS', file='../.env') }}"
COMPANION_HIDE_WELCOME: "{{ lookup('dotenv', 'COMPANION_HIDE_WELCOME', file='../.env') }}"
COMPANION_STREAMING_UPLOAD: "{{ lookup('dotenv', 'COMPANION_STREAMING_UPLOAD', file='../.env') }}"
COMPANION_TUS_DEFERRED_UPLOAD_LENGTH: "{{ lookup('dotenv', 'COMPANION_TUS_DEFERRED_UPLOAD_LENGTH', file='../.env') }}"
COMPANION_CLIENT_ORIGINS: "{{ lookup('dotenv', 'COMPANION_CLIENT_ORIGINS', file='../.env') }}"
COMPANION_PROTOCOL: "{{ lookup('dotenv', 'COMPANION_PROTOCOL', file='../.env') }}"
COMPANION_DATADIR: /mnt/uppy-server-data
COMPANION_SECRET: "{{ lookup('dotenv', 'COMPANION_SECRET', file='../.env') }}"
COMPANION_PREAUTH_SECRET: "{{ lookup('dotenv', 'COMPANION_PREAUTH_SECRET', file='../.env') }}"
COMPANION_AWS_KEY: "{{ lookup('dotenv', 'COMPANION_AWS_KEY', file='../.env') }}"
COMPANION_AWS_SECRET: "{{ lookup('dotenv', 'COMPANION_AWS_SECRET', file='../.env') }}"
COMPANION_AWS_BUCKET: "{{ lookup('dotenv', 'COMPANION_AWS_BUCKET', file='../.env') }}"
COMPANION_AWS_ENDPOINT: "{{ lookup('dotenv', 'COMPANION_AWS_ENDPOINT', file='../.env') }}"
COMPANION_AWS_REGION: "{{ lookup('dotenv', 'COMPANION_AWS_REGION', file='../.env') }}"
COMPANION_AWS_FORCE_PATH_STYLE: "false"
COMPANION_AWS_PREFIX: usc/
mounts:
- type: volume
target: "/mnt/uppy-server-data"
source: "uppy_data"
# - name: Allow VPC2.0 network access
# community.general.ufw:
# rule: allow
# port: '5432'
# proto: tcp
# from: 10.2.128.0/20

View File

@ -1,15 +0,0 @@
uppy.futureporn.net {
# Define the upstream servers for load balancing
reverse_proxy :3020 {
# Health checks (optional)
health_uri /metrics
health_interval 10s
health_timeout 5s
}
handle_errors {
respond "💥 Error ~ {err.status_code} {err.status_text}"
}
}

View File

@ -64,13 +64,27 @@ services:
max_attempts: 5 max_attempts: 5
window: 60s window: 60s
# pgadmin: pgadmin:
# image: dpage/pgadmin4:latest image: dpage/pgadmin4:latest
# ports: network_mode: service:tailscale-pgadmin
# - target: 5050 environment:
# published: 8095 PGADMIN_DISABLE_POSTFIX: 1
# protocol: tcp
# mode: ingress tailscale-pgadmin:
image: tailscale/tailscale:latest
volumes:
- /mnt/vfs/futureporn/tailscale/state:/var/lib/tailscale
- /mnt/vfs/futureporn/tailscale/config:/config
devices:
- /dev/net/tun:/dev/net/tun
cap_add:
- net_admin
restart: unless-stopped
environment:
TS_EXTRA_ARGS: --advertise-tags=tag:container --reset"
TS_SERVE_CONFIG: /config/pgadmin.json
TS_STATE_DIR: /var/lib/tailscale
TS_USERSPACE: false
volumes: volumes:
pgdata: pgdata: