use container port 3000
Some checks are pending
fp/our CI/CD / build (push) Waiting to run
ci / test (push) Waiting to run

This commit is contained in:
CJ_Clippy 2025-10-07 04:12:07 -08:00
parent 6bc5f182f3
commit 8ef71691e0
22 changed files with 257 additions and 208 deletions

View File

@ -5,34 +5,10 @@ set -euo pipefail
# Script: pinall.sh # Script: pinall.sh
# #
# Description: # Description:
# This script reads a list of S3 object keys (filenames) # Downloads all files listed in an input file to a temporary
# from a file, downloads each file from Backblaze B2, # directory (only if not already present), then pins them
# adds it to a local IPFS node, and optionally cleans up # all to a local IPFS node. Cleans up temp files only if
# the temporary downloaded file to save disk space. # all downloads and pins succeeded.
#
# Usage:
# ./pinall.sh <file-with-s3-keys>
# Example:
# # sudo -u ipfs env IPFS_PATH=/mnt/blockstorage/ipfs pinall.sh /home/ipfs/filenames.txt
#
# - files.txt should contain one S3 key per line.
# - Lines starting with '#' or empty lines are ignored.
#
# Environment:
# - Requires `b2` CLI configured with B2 credentials.
# - Requires an IPFS node installed and accessible at
# $IPFS_PATH (set in script).
#
# Behavior:
# 1. Reads each key from the input file.
# 2. Downloads the file from B2 to /tmp/<key>.
# 3. Adds the downloaded file to IPFS (CID version 1).
# 4. Deletes the temporary file after adding to IPFS.
# 5. Logs progress with timestamps to stdout.
#
# Exit Codes:
# - 0: All files processed successfully.
# - 1: Incorrect usage or missing input file.
# #
############################################################ ############################################################
@ -42,31 +18,56 @@ if [ $# -ne 1 ]; then
exit 1 exit 1
fi fi
FILELIST=$1
TMPDIR="/mnt/blockstorage/pinalltmp"
# Ensure tmp directory exists
mkdir -p "$TMPDIR"
ipfs id ipfs id
echo "Using IPFS_PATH=$IPFS_PATH" echo "Using IPFS_PATH=$IPFS_PATH"
FILELIST=$1 # Track overall success
ALL_OK=true
# First pass: download files if not already present
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Starting downloads..."
while IFS= read -r KEY; do while IFS= read -r KEY; do
[[ -z "$KEY" || "$KEY" =~ ^# ]] && continue [[ -z "$KEY" || "$KEY" =~ ^# ]] && continue
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Downloading $KEY from B2..." TMPFILE="$TMPDIR/$KEY"
TMPFILE="/tmp/$KEY"
if [ -f "$TMPFILE" ]; then
echo "[$(date +"%Y-%m-%d %H:%M:%S")] File already exists, skipping: $KEY"
continue
fi
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Downloading $KEY to $TMPFILE..."
if b2 file download "b2://futureporn/$KEY" "$TMPFILE"; then if b2 file download "b2://futureporn/$KEY" "$TMPFILE"; then
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Download complete: $KEY" echo "[$(date +"%Y-%m-%d %H:%M:%S")] Download complete: $KEY"
else else
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Download failed: $KEY" echo "[$(date +"%Y-%m-%d %H:%M:%S")] Download failed: $KEY"
rm -f "$TMPFILE" ALL_OK=false
continue
fi fi
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Adding $KEY to IPFS..."
ipfs add --cid-version=1 "$TMPFILE"
# optional cleanup to save space
rm -f "$TMPFILE"
done < "$FILELIST" done < "$FILELIST"
echo "[$(date +"%Y-%m-%d %H:%M:%S")] All tasks complete." # Second pass: pin all files
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Starting IPFS pinning..."
for FILE in "$TMPDIR"/*; do
[[ ! -f "$FILE" ]] && continue
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Adding $(basename "$FILE") to IPFS..."
if ! ipfs add --cid-version=1 "$FILE"; then
echo "[$(date +"%Y-%m-%d %H:%M:%S")] IPFS add failed for $(basename "$FILE")"
ALL_OK=false
fi
done
# Cleanup only if all succeeded
if [ "$ALL_OK" = true ]; then
echo "[$(date +"%Y-%m-%d %H:%M:%S")] All downloads and pins succeeded. Cleaning up temporary files..."
rm -rf "$TMPDIR"/*
else
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Some operations failed. Leaving temporary files for inspection."
fi
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Script finished."

View File

@ -0,0 +1,13 @@
---
- name: Reload caddy
ansible.builtin.systemd_service:
name: caddy
state: reloaded
- name: Restart caddy
ansible.builtin.systemd_service:
name: caddy
state: restarted
enabled: true
daemon_reload: true

View File

@ -1,4 +1,9 @@
--- ---
- name: Debuggy
debug:
msg: "our_published_port={{ our_published_port }}"
- name: Configure firewall - name: Configure firewall
community.general.ufw: community.general.ufw:
rule: allow rule: allow
@ -11,6 +16,7 @@
- name: Install Caddy - name: Install Caddy
ansible.builtin.import_role: ansible.builtin.import_role:
name: nvjacobo.caddy name: nvjacobo.caddy
notify: Restart caddy
- name: Create html dir - name: Create html dir
ansible.builtin.file: ansible.builtin.file:
@ -23,4 +29,4 @@
src: 'templates/Caddyfile.j2' src: 'templates/Caddyfile.j2'
dest: /etc/caddy/Caddyfile dest: /etc/caddy/Caddyfile
mode: "0644" mode: "0644"
notify: reload caddy notify: Reload caddy

View File

@ -1,21 +1,21 @@
{% set sites = ['future.porn', 'pgadmin.sbtp.xyz', 'rssapp.sbtp.xyz'] %} {% set sites = {
'future.porn': our_published_port,
'pgadmin.sbtp.xyz': 9095,
'rssapp.sbtp.xyz': 9096
} %}
{% for site in sites %} {% for site, port in sites.items() %}
{{ site }} { {{ site }} {
# Define the upstream servers (docker swarm nodes) for load balancing # Define the upstream servers (docker swarm nodes) for load balancing
reverse_proxy {% for host in groups['our'] %}{{ hostvars[host]['internal_ip'] }}:{{ our_server_port }} {% endfor %} { reverse_proxy {% for host in groups['swarm'] %}{{ hostvars[host]['internal_ip'] }}:{{ port }} {% endfor %} {
# Load balancing policy (optional, defaults to "random") health_uri /health
lb_policy least_connections health_interval 10s
health_timeout 5s
}
# Health checks handle_errors {
health_uri /health respond "💥 Error; Please try again later. Code {err.status_code} | {err.status_text}. Our code monkeys have been deployed to fix the issue~"
health_interval 10s }
health_timeout 5s
}
handle_errors {
respond "💥 Error; Please try again later. Code {err.status_code} | {err.status_text}."
}
} }
{% endfor %} {% endfor %}

View File

@ -0,0 +1,10 @@
---
- name: Allow Docker Swarm ports on enp8s0
community.general.ufw:
rule: allow
port: "{{ item.port }}"
proto: "{{ item.proto }}"
direction: in
interface: enp8s0
loop:
- { port: "{{ our_published_port }}", proto: tcp } # our server

View File

@ -1,13 +1,13 @@
--- ---
- name: Configure firewall
ansible.builtin.include_tasks:
file: firewall.yml
- name: Configure filesystem - name: Configure filesystem
ansible.builtin.include_tasks: ansible.builtin.include_tasks:
file: filesystem.yml file: filesystem.yml
- name: Configure docker stack - name: Configure docker stack app
ansible.builtin.include_tasks:
file: stack.yml
- name: Deploy our via docker stack
ansible.builtin.include_tasks: ansible.builtin.include_tasks:
file: stack.yml file: stack.yml

View File

@ -11,6 +11,7 @@
scope: swarm scope: swarm
state: present state: present
driver: overlay driver: overlay
when: inventory_hostname == groups['swarm'] | first
- name: Clone the latest code - name: Clone the latest code
ansible.builtin.git: ansible.builtin.git:
@ -31,6 +32,7 @@
- compose.production.yaml - compose.production.yaml
- name: Deploy stack - name: Deploy stack
when: inventory_hostname == groups['swarm'] | first
community.docker.docker_stack: community.docker.docker_stack:
state: present state: present
name: our name: our

View File

@ -0,0 +1,10 @@
---
- name: Allow Docker Swarm ports on enp8s0
community.general.ufw:
rule: allow
port: "{{ item.port }}"
proto: "{{ item.proto }}"
direction: in
interface: enp8s0
loop:
- { port: 9096, proto: tcp } # rssapp

View File

@ -0,0 +1,11 @@
---
- name: Configure rssapp docker service
ansible.builtin.include_tasks:
file: service.yml
when: inventory_hostname == groups['swarm'] | first
- name: Configure rssapp firewall
ansible.builtin.include_tasks:
file: firewall.yml

View File

@ -0,0 +1,32 @@
---
- name: Deploy rssapp service to Docker Swarm
community.docker.docker_swarm_service:
name: rssapp
image: gitea.futureporn.net/futureporn/rssapp:latest
labels:
net.futureporn.service: "rssapp"
healthcheck:
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:3000/health"]
interval: 1m
timeout: 10s
retries: 3
start_period: 15s
restart_config:
condition: on-failure
delay: 5s
max_attempts: 3
window: 60s
publish:
- published_port: 9096
target_port: 3000
protocol: tcp
mode: ingress
env:
APIFY_TOKEN: "{{ lookup('dotenv', 'RSSAPP_APIFY_TOKEN', file='../../../../.env.production') }}"
ORIGIN: "{{ lookup('dotenv', 'RSSAPP_ORIGIN', file='../../../../.env.production') }}"
WHITELIST: "{{ lookup('dotenv', 'RSSAPP_WHITELIST', file='../../../../.env.production') }}"

View File

@ -1,3 +1,2 @@
--- ---
swarm_enable_manager: false swarm_enable_manager: false
swarm_enable_worker: false

View File

@ -0,0 +1,13 @@
---
- name: Allow Docker Swarm ports on enp8s0
community.general.ufw:
rule: allow
port: "{{ item.port }}"
proto: "{{ item.proto }}"
direction: in
interface: enp8s0
loop:
- { port: 2377, proto: tcp } # Swarm control plane
- { port: 7946, proto: tcp } # Gossip TCP
- { port: 7946, proto: udp } # Gossip UDP
- { port: 4789, proto: udp } # Overlay network VXLAN

View File

@ -1,5 +1,9 @@
--- ---
- name: Configure firewall
ansible.builtin.include_tasks:
file: firewall.yml
- name: Configure swarm manager - name: Configure swarm manager
ansible.builtin.include_tasks: ansible.builtin.include_tasks:
file: manager.yml file: manager.yml
@ -8,4 +12,4 @@
- name: Configure swarm worker - name: Configure swarm worker
ansible.builtin.include_tasks: ansible.builtin.include_tasks:
file: worker.yml file: worker.yml
when: swarm_enable_worker when: not swarm_enable_manager

View File

@ -7,7 +7,7 @@
register: swarm_create register: swarm_create
- name: Set join tokens as host facts (manager only) - name: Set join tokens as host facts (manager only)
set_fact: ansible.builtin.set_fact:
swarm_worker_join_token: "{{ swarm_create.swarm_facts.JoinTokens.Worker }}" swarm_worker_join_token: "{{ swarm_create.swarm_facts.JoinTokens.Worker }}"
swarm_manager_join_token: "{{ swarm_create.swarm_facts.JoinTokens.Manager }}" swarm_manager_join_token: "{{ swarm_create.swarm_facts.JoinTokens.Manager }}"

View File

@ -1,19 +1,12 @@
--- ---
- debug: - name: Debug
var: groups['swarm-node'] ansible.builtin.debug:
var: groups['swarm']
- name: Get all swarm nodes except the first one
set_fact:
swarm_worker_ips: "{{ groups['swarm-node'][1:] }}"
- name: Join worker nodes - name: Join worker nodes
community.docker.docker_swarm: community.docker.docker_swarm:
state: join state: join
advertise_addr: "{{ internal_ip }}:4567" advertise_addr: "{{ internal_ip }}"
join_token: "{{ hostvars[groups['swarm-node'] | first]['swarm_worker_join_token'] }}" join_token: "{{ hostvars[groups['swarm'] | first]['swarm_worker_join_token'] }}"
remote_addrs: swarm_worker_ips remote_addrs:
# - name: Join swarm as worker - "{{ hostvars[groups['swarm'] | first]['internal_ip'] }}:2377"
# community.docker.docker_swarm:
# state: joined
# join_token: "{{ hostvars[groups['swarm-node'] | first].swarm_worker_join_token }}"
# remote_addrs: ["{{ hostvars[groups['swarm-node'] | first].internal_ip }}:2377"]

View File

@ -9,7 +9,7 @@
- bootstrap - bootstrap
- name: Assert common dependencies - name: Assert common dependencies
hosts: swarm-node hosts: all
gather_facts: true gather_facts: true
check_mode: false check_mode: false
become: true become: true
@ -17,21 +17,24 @@
- common - common
- docker - docker
# the decision of worker vs manager is set in ansible inventory by opentofu
- name: Set up docker swarm - name: Set up docker swarm
hosts: swarm-node hosts: swarm
gather_facts: true gather_facts: true
roles: roles:
- swarm - swarm
vars:
swarm_enable_manager: "{{ inventory_hostname == groups['swarm'][0] }}"
- name: Assert our dependencies - name: Setup swarm apps
hosts: swarm-node hosts: swarm
gather_facts: true gather_facts: true
check_mode: false check_mode: false
become: true become: true
vars_files:
- vars/main.yml
roles: roles:
- our - our
- rssapp
- name: Configure load balancer - name: Configure load balancer
hosts: loadbalancer hosts: loadbalancer
@ -68,13 +71,6 @@
- ipfs - ipfs
# - name: Set up our app
# hosts: swarm-node
# gather_facts: true
# roles:
# - our
# tags:
# - our
# - name: Install Capture instance # - name: Install Capture instance
# hosts: capture # hosts: capture

View File

@ -5,7 +5,7 @@
# infisical_url: "{{ lookup('dotenv', 'INFISICAL_URL', file='../.env' )}}" # infisical_url: "{{ lookup('dotenv', 'INFISICAL_URL', file='../.env' )}}"
# infisical_env_slug: prod # infisical_env_slug: prod
# infisical_secrets: >- # infisical_secrets: >-
# {{ # {{
# lookup( # lookup(
# 'infisical.vault.read_secrets', # 'infisical.vault.read_secrets',
# universal_auth_client_id=infisical_client_id, # universal_auth_client_id=infisical_client_id,
@ -23,6 +23,5 @@ s3_region: us-west-000
s3_endpoint: https://s3.us-west-000.backblazeb2.com s3_endpoint: https://s3.us-west-000.backblazeb2.com
kubo_version: v0.34.1 kubo_version: v0.34.1
our_server_port: 3000 our_server_port: 3000
our_published_port: 8086

View File

@ -1,7 +0,0 @@
services:
server:
ports:
- target: 5000
published: 8087
protocol: tcp
mode: ingress

View File

@ -1,13 +0,0 @@
services:
# pgadmin:
# image: dpage/pgadmin4:latest
# ports:
# - target: 5050
# published: 8095
# protocol: tcp
# mode: ingress
networks:
default:
external: true
name: spooky

View File

@ -1,7 +0,0 @@
services:
server:
ports:
- target: 5000 # container port
published: 8086 # Swarm ingress port
protocol: tcp
mode: ingress

View File

@ -7,10 +7,16 @@ services:
volumes: volumes:
- /mnt/vfs/futureporn:/mnt/vfs/futureporn - /mnt/vfs/futureporn:/mnt/vfs/futureporn
ports: ports:
- target: 5000 # container port - target: 3000
published: 8086 # Swarm ingress port published: 8086
protocol: tcp protocol: tcp
mode: ingress mode: ingress
healthcheck:
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:3000/health"]
interval: 10s
retries: 5
start_period: 10s
timeout: 10s
worker: worker:
image: gitea.futureporn.net/futureporn/our:latest image: gitea.futureporn.net/futureporn/our:latest

View File

@ -101,17 +101,17 @@ resource "vultr_vpc" "futureporn_vpc" {
region = "ord" region = "ord"
} }
# resource "bunnynet_dns_record" "future_porn_a" { resource "bunnynet_dns_record" "future_porn_a" {
# for_each = zipmap( for_each = zipmap(
# range(length(vultr_instance.our_vps)), range(length(vultr_instance.loadbalancer)),
# vultr_instance.our_vps vultr_instance.loadbalancer
# ) )
# zone = bunnynet_dns_zone.future_porn.id zone = bunnynet_dns_zone.future_porn.id
# name = "*" name = "*"
# type = "A" type = "A"
# value = each.value.main_ip value = each.value.main_ip
# } }
@ -121,52 +121,34 @@ resource "bunnynet_dns_zone" "future_porn" {
# load balancing instance # load balancing instance
# resource "vultr_instance" "loadbalancer" { resource "vultr_instance" "loadbalancer" {
# count = 1 count = 1
# hostname = "fp-lb-${count.index}" hostname = "fp-lb-${count.index}"
# plan = "vc2-1c-2gb" plan = "vc2-1c-2gb"
# region = "ord" region = "ord"
# backups = "disabled" backups = "disabled"
# ddos_protection = "false" ddos_protection = "false"
# os_id = 1743 os_id = 1743
# enable_ipv6 = true enable_ipv6 = true
# label = "fp lb ${count.index}" label = "fp lb ${count.index}"
# tags = ["futureporn", "loadbalancer", "our", "tofu"] tags = ["futureporn", "loadbalancer", "our", "tofu"]
# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID] ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
# user_data = base64encode(var.vps_user_data) user_data = base64encode(var.vps_user_data)
# vpc_ids = [ vpc_ids = [
# vultr_vpc.futureporn_vpc.id vultr_vpc.futureporn_vpc.id
# ] ]
# reserved_ip_id = vultr_reserved_ip.futureporn_v2_ip.id reserved_ip_id = vultr_reserved_ip.futureporn_v2_ip.id
# } }
# our0
# resource "vultr_instance" "our_vps" {
# count = 1
# hostname = "fp-our-${count.index}"
# plan = "vc2-4c-8gb"
# region = "ord"
# backups = "disabled"
# ddos_protection = "false"
# os_id = 1743
# enable_ipv6 = true
# label = "fp our ${count.index}"
# tags = ["futureporn", "our", "tofu"]
# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
# user_data = base64encode(var.vps_user_data)
# vpc_ids = [
# vultr_vpc.futureporn_vpc.id
# ]
# }
# resource "bunnynet_dns_record" "future_porn_apex" {
# zone = bunnynet_dns_zone.future_porn.id resource "bunnynet_dns_record" "future_porn_apex" {
# name = "" zone = bunnynet_dns_zone.future_porn.id
# type = "A" name = ""
# value = vultr_instance.loadbalancer[0].main_ip type = "A"
# ttl = 3600 value = vultr_instance.loadbalancer[0].main_ip
# } ttl = 3600
}
resource "bunnynet_dns_record" "www_future_porn" { resource "bunnynet_dns_record" "www_future_porn" {
@ -192,7 +174,7 @@ resource "vultr_instance" "swarm_node" {
os_id = 1743 os_id = 1743
enable_ipv6 = true enable_ipv6 = true
label = "swarm node ${count.index}" label = "swarm node ${count.index}"
tags = ["fp", "our", "server", "tofu"] tags = ["fp", "swarm", "server", "tofu"]
ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID] ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
vpc_ids = [ vpc_ids = [
vultr_vpc.futureporn_vpc.id vultr_vpc.futureporn_vpc.id
@ -296,23 +278,23 @@ resource "vultr_block_storage" "ipfs_blockstorage" {
# } # }
# } # }
# resource "ansible_host" "loadbalancer" { resource "ansible_host" "loadbalancer" {
# count = length(vultr_instance.loadbalancer) count = length(vultr_instance.loadbalancer)
# name = vultr_instance.loadbalancer[count.index].hostname name = vultr_instance.loadbalancer[count.index].hostname
# groups = ["loadbalancer"] groups = ["loadbalancer"]
# variables = { variables = {
# ansible_host = vultr_instance.loadbalancer[count.index].main_ip ansible_host = vultr_instance.loadbalancer[count.index].main_ip
# internal_ip = vultr_instance.loadbalancer[count.index].internal_ip internal_ip = vultr_instance.loadbalancer[count.index].internal_ip
# } }
# } }
resource "ansible_host" "swarm_node" { resource "ansible_host" "swarm_node" {
for_each = { for idx, host in vultr_instance.swarm_node : idx => host } for_each = { for idx, host in vultr_instance.swarm_node : idx => host }
name = each.value.hostname name = each.value.hostname
groups = ["swarm-node"] groups = ["swarm"]
variables = { variables = {
ansible_host = each.value.main_ip ansible_host = each.value.main_ip
internal_ip = each.value.internal_ip internal_ip = each.value.internal_ip
@ -337,18 +319,18 @@ resource "ansible_host" "swarm_node" {
# } # }
# } # }
# resource "ansible_host" "our" { resource "ansible_host" "swarm" {
# for_each = { for idx, host in vultr_instance.our_vps : idx => host } for_each = { for idx, host in vultr_instance.swarm_node : idx => host }
# name = each.value.hostname name = each.value.hostname
# groups = ["our"] groups = ["swarm"]
# variables = { variables = {
# ansible_host = each.value.main_ip ansible_host = each.value.main_ip
# internal_ip = each.value.internal_ip internal_ip = each.value.internal_ip
# vultr_instance_id = each.value.id vultr_instance_id = each.value.id
# vultr_vfs_storage_id = vultr_virtual_file_system_storage.vfs.id vultr_vfs_storage_id = vultr_virtual_file_system_storage.vfs.id
# } }
# } }
resource "vultr_virtual_file_system_storage" "vfs" { resource "vultr_virtual_file_system_storage" "vfs" {
label = "fp-vfs-cache" label = "fp-vfs-cache"
@ -366,8 +348,8 @@ resource "vultr_virtual_file_system_storage" "vfs" {
# } # }
resource "ansible_group" "swarm-node" { resource "ansible_group" "swarm" {
name = "swarm-node" name = "swarm"
} }
@ -376,9 +358,9 @@ resource "ansible_group" "our" {
} }
# resource "ansible_group" "loadbalancer" { resource "ansible_group" "loadbalancer" {
# name = "loadbalancer" name = "loadbalancer"
# } }
resource "ansible_group" "ipfs" { resource "ansible_group" "ipfs" {
name = "ipfs" name = "ipfs"
@ -387,10 +369,9 @@ resource "ansible_group" "ipfs" {
resource "ansible_group" "futureporn" { resource "ansible_group" "futureporn" {
name = "futureporn" name = "futureporn"
children = [ children = [
# "loadbalancer", "loadbalancer",
"capture", "capture",
"swarm-node", "swarm",
"our",
"ipfs" "ipfs"
] ]
} }