use container port 3000
Some checks are pending
fp/our CI/CD / build (push) Waiting to run
ci / test (push) Waiting to run

This commit is contained in:
CJ_Clippy 2025-10-07 04:12:07 -08:00
parent 6bc5f182f3
commit 8ef71691e0
22 changed files with 257 additions and 208 deletions

View File

@ -5,34 +5,10 @@ set -euo pipefail
# Script: pinall.sh
#
# Description:
# This script reads a list of S3 object keys (filenames)
# from a file, downloads each file from Backblaze B2,
# adds it to a local IPFS node, and optionally cleans up
# the temporary downloaded file to save disk space.
#
# Usage:
# ./pinall.sh <file-with-s3-keys>
# Example:
# # sudo -u ipfs env IPFS_PATH=/mnt/blockstorage/ipfs pinall.sh /home/ipfs/filenames.txt
#
# - files.txt should contain one S3 key per line.
# - Lines starting with '#' or empty lines are ignored.
#
# Environment:
# - Requires `b2` CLI configured with B2 credentials.
# - Requires an IPFS node installed and accessible at
# $IPFS_PATH (set in script).
#
# Behavior:
# 1. Reads each key from the input file.
# 2. Downloads the file from B2 to /tmp/<key>.
# 3. Adds the downloaded file to IPFS (CID version 1).
# 4. Deletes the temporary file after adding to IPFS.
# 5. Logs progress with timestamps to stdout.
#
# Exit Codes:
# - 0: All files processed successfully.
# - 1: Incorrect usage or missing input file.
# Downloads all files listed in an input file to a temporary
# directory (only if not already present), then pins them
# all to a local IPFS node. Cleans up temp files only if
# all downloads and pins succeeded.
#
############################################################
@ -42,31 +18,56 @@ if [ $# -ne 1 ]; then
exit 1
fi
FILELIST=$1
TMPDIR="/mnt/blockstorage/pinalltmp"
# Ensure tmp directory exists
mkdir -p "$TMPDIR"
ipfs id
echo "Using IPFS_PATH=$IPFS_PATH"
FILELIST=$1
# Track overall success
ALL_OK=true
# First pass: download files if not already present
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Starting downloads..."
while IFS= read -r KEY; do
[[ -z "$KEY" || "$KEY" =~ ^# ]] && continue
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Downloading $KEY from B2..."
TMPFILE="/tmp/$KEY"
TMPFILE="$TMPDIR/$KEY"
if [ -f "$TMPFILE" ]; then
echo "[$(date +"%Y-%m-%d %H:%M:%S")] File already exists, skipping: $KEY"
continue
fi
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Downloading $KEY to $TMPFILE..."
if b2 file download "b2://futureporn/$KEY" "$TMPFILE"; then
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Download complete: $KEY"
else
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Download failed: $KEY"
rm -f "$TMPFILE"
continue
ALL_OK=false
fi
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Adding $KEY to IPFS..."
ipfs add --cid-version=1 "$TMPFILE"
# optional cleanup to save space
rm -f "$TMPFILE"
done < "$FILELIST"
echo "[$(date +"%Y-%m-%d %H:%M:%S")] All tasks complete."
# Second pass: pin all files
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Starting IPFS pinning..."
for FILE in "$TMPDIR"/*; do
[[ ! -f "$FILE" ]] && continue
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Adding $(basename "$FILE") to IPFS..."
if ! ipfs add --cid-version=1 "$FILE"; then
echo "[$(date +"%Y-%m-%d %H:%M:%S")] IPFS add failed for $(basename "$FILE")"
ALL_OK=false
fi
done
# Cleanup only if all succeeded
if [ "$ALL_OK" = true ]; then
echo "[$(date +"%Y-%m-%d %H:%M:%S")] All downloads and pins succeeded. Cleaning up temporary files..."
rm -rf "$TMPDIR"/*
else
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Some operations failed. Leaving temporary files for inspection."
fi
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Script finished."

View File

@ -0,0 +1,13 @@
---
- name: Reload caddy
ansible.builtin.systemd_service:
name: caddy
state: reloaded
- name: Restart caddy
ansible.builtin.systemd_service:
name: caddy
state: restarted
enabled: true
daemon_reload: true

View File

@ -1,4 +1,9 @@
---
- name: Debuggy
debug:
msg: "our_published_port={{ our_published_port }}"
- name: Configure firewall
community.general.ufw:
rule: allow
@ -11,6 +16,7 @@
- name: Install Caddy
ansible.builtin.import_role:
name: nvjacobo.caddy
notify: Restart caddy
- name: Create html dir
ansible.builtin.file:
@ -23,4 +29,4 @@
src: 'templates/Caddyfile.j2'
dest: /etc/caddy/Caddyfile
mode: "0644"
notify: reload caddy
notify: Reload caddy

View File

@ -1,21 +1,21 @@
{% set sites = ['future.porn', 'pgadmin.sbtp.xyz', 'rssapp.sbtp.xyz'] %}
{% set sites = {
'future.porn': our_published_port,
'pgadmin.sbtp.xyz': 9095,
'rssapp.sbtp.xyz': 9096
} %}
{% for site in sites %}
{% for site, port in sites.items() %}
{{ site }} {
# Define the upstream servers (docker swarm nodes) for load balancing
reverse_proxy {% for host in groups['our'] %}{{ hostvars[host]['internal_ip'] }}:{{ our_server_port }} {% endfor %} {
# Load balancing policy (optional, defaults to "random")
lb_policy least_connections
# Define the upstream servers (docker swarm nodes) for load balancing
reverse_proxy {% for host in groups['swarm'] %}{{ hostvars[host]['internal_ip'] }}:{{ port }} {% endfor %} {
health_uri /health
health_interval 10s
health_timeout 5s
}
# Health checks
health_uri /health
health_interval 10s
health_timeout 5s
}
handle_errors {
respond "💥 Error; Please try again later. Code {err.status_code} | {err.status_text}."
}
handle_errors {
respond "💥 Error; Please try again later. Code {err.status_code} | {err.status_text}. Our code monkeys have been deployed to fix the issue~"
}
}
{% endfor %}

View File

@ -0,0 +1,10 @@
---
- name: Allow Docker Swarm ports on enp8s0
community.general.ufw:
rule: allow
port: "{{ item.port }}"
proto: "{{ item.proto }}"
direction: in
interface: enp8s0
loop:
- { port: "{{ our_published_port }}", proto: tcp } # our server

View File

@ -1,13 +1,13 @@
---
- name: Configure firewall
ansible.builtin.include_tasks:
file: firewall.yml
- name: Configure filesystem
ansible.builtin.include_tasks:
file: filesystem.yml
- name: Configure docker stack
ansible.builtin.include_tasks:
file: stack.yml
- name: Deploy our via docker stack
- name: Configure docker stack app
ansible.builtin.include_tasks:
file: stack.yml

View File

@ -11,6 +11,7 @@
scope: swarm
state: present
driver: overlay
when: inventory_hostname == groups['swarm'] | first
- name: Clone the latest code
ansible.builtin.git:
@ -31,6 +32,7 @@
- compose.production.yaml
- name: Deploy stack
when: inventory_hostname == groups['swarm'] | first
community.docker.docker_stack:
state: present
name: our

View File

@ -0,0 +1,10 @@
---
- name: Allow Docker Swarm ports on enp8s0
community.general.ufw:
rule: allow
port: "{{ item.port }}"
proto: "{{ item.proto }}"
direction: in
interface: enp8s0
loop:
- { port: 9096, proto: tcp } # rssapp

View File

@ -0,0 +1,11 @@
---
- name: Configure rssapp docker service
ansible.builtin.include_tasks:
file: service.yml
when: inventory_hostname == groups['swarm'] | first
- name: Configure rssapp firewall
ansible.builtin.include_tasks:
file: firewall.yml

View File

@ -0,0 +1,32 @@
---
- name: Deploy rssapp service to Docker Swarm
community.docker.docker_swarm_service:
name: rssapp
image: gitea.futureporn.net/futureporn/rssapp:latest
labels:
net.futureporn.service: "rssapp"
healthcheck:
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:3000/health"]
interval: 1m
timeout: 10s
retries: 3
start_period: 15s
restart_config:
condition: on-failure
delay: 5s
max_attempts: 3
window: 60s
publish:
- published_port: 9096
target_port: 3000
protocol: tcp
mode: ingress
env:
APIFY_TOKEN: "{{ lookup('dotenv', 'RSSAPP_APIFY_TOKEN', file='../../../../.env.production') }}"
ORIGIN: "{{ lookup('dotenv', 'RSSAPP_ORIGIN', file='../../../../.env.production') }}"
WHITELIST: "{{ lookup('dotenv', 'RSSAPP_WHITELIST', file='../../../../.env.production') }}"

View File

@ -1,3 +1,2 @@
---
swarm_enable_manager: false
swarm_enable_worker: false

View File

@ -0,0 +1,13 @@
---
- name: Allow Docker Swarm ports on enp8s0
community.general.ufw:
rule: allow
port: "{{ item.port }}"
proto: "{{ item.proto }}"
direction: in
interface: enp8s0
loop:
- { port: 2377, proto: tcp } # Swarm control plane
- { port: 7946, proto: tcp } # Gossip TCP
- { port: 7946, proto: udp } # Gossip UDP
- { port: 4789, proto: udp } # Overlay network VXLAN

View File

@ -1,5 +1,9 @@
---
- name: Configure firewall
ansible.builtin.include_tasks:
file: firewall.yml
- name: Configure swarm manager
ansible.builtin.include_tasks:
file: manager.yml
@ -8,4 +12,4 @@
- name: Configure swarm worker
ansible.builtin.include_tasks:
file: worker.yml
when: swarm_enable_worker
when: not swarm_enable_manager

View File

@ -7,7 +7,7 @@
register: swarm_create
- name: Set join tokens as host facts (manager only)
set_fact:
ansible.builtin.set_fact:
swarm_worker_join_token: "{{ swarm_create.swarm_facts.JoinTokens.Worker }}"
swarm_manager_join_token: "{{ swarm_create.swarm_facts.JoinTokens.Manager }}"

View File

@ -1,19 +1,12 @@
---
- debug:
var: groups['swarm-node']
- name: Get all swarm nodes except the first one
set_fact:
swarm_worker_ips: "{{ groups['swarm-node'][1:] }}"
- name: Debug
ansible.builtin.debug:
var: groups['swarm']
- name: Join worker nodes
community.docker.docker_swarm:
state: join
advertise_addr: "{{ internal_ip }}:4567"
join_token: "{{ hostvars[groups['swarm-node'] | first]['swarm_worker_join_token'] }}"
remote_addrs: swarm_worker_ips
# - name: Join swarm as worker
# community.docker.docker_swarm:
# state: joined
# join_token: "{{ hostvars[groups['swarm-node'] | first].swarm_worker_join_token }}"
# remote_addrs: ["{{ hostvars[groups['swarm-node'] | first].internal_ip }}:2377"]
advertise_addr: "{{ internal_ip }}"
join_token: "{{ hostvars[groups['swarm'] | first]['swarm_worker_join_token'] }}"
remote_addrs:
- "{{ hostvars[groups['swarm'] | first]['internal_ip'] }}:2377"

View File

@ -9,7 +9,7 @@
- bootstrap
- name: Assert common dependencies
hosts: swarm-node
hosts: all
gather_facts: true
check_mode: false
become: true
@ -17,21 +17,24 @@
- common
- docker
# the decision of worker vs manager is set in ansible inventory by opentofu
- name: Set up docker swarm
hosts: swarm-node
hosts: swarm
gather_facts: true
roles:
- swarm
vars:
swarm_enable_manager: "{{ inventory_hostname == groups['swarm'][0] }}"
- name: Assert our dependencies
hosts: swarm-node
- name: Setup swarm apps
hosts: swarm
gather_facts: true
check_mode: false
become: true
vars_files:
- vars/main.yml
roles:
- our
- rssapp
- name: Configure load balancer
hosts: loadbalancer
@ -68,13 +71,6 @@
- ipfs
# - name: Set up our app
# hosts: swarm-node
# gather_facts: true
# roles:
# - our
# tags:
# - our
# - name: Install Capture instance
# hosts: capture

View File

@ -5,7 +5,7 @@
# infisical_url: "{{ lookup('dotenv', 'INFISICAL_URL', file='../.env' )}}"
# infisical_env_slug: prod
# infisical_secrets: >-
# {{
# {{
# lookup(
# 'infisical.vault.read_secrets',
# universal_auth_client_id=infisical_client_id,
@ -23,6 +23,5 @@ s3_region: us-west-000
s3_endpoint: https://s3.us-west-000.backblazeb2.com
kubo_version: v0.34.1
our_server_port: 3000
our_published_port: 8086

View File

@ -1,7 +0,0 @@
services:
server:
ports:
- target: 5000
published: 8087
protocol: tcp
mode: ingress

View File

@ -1,13 +0,0 @@
services:
# pgadmin:
# image: dpage/pgadmin4:latest
# ports:
# - target: 5050
# published: 8095
# protocol: tcp
# mode: ingress
networks:
default:
external: true
name: spooky

View File

@ -1,7 +0,0 @@
services:
server:
ports:
- target: 5000 # container port
published: 8086 # Swarm ingress port
protocol: tcp
mode: ingress

View File

@ -7,10 +7,16 @@ services:
volumes:
- /mnt/vfs/futureporn:/mnt/vfs/futureporn
ports:
- target: 5000 # container port
published: 8086 # Swarm ingress port
- target: 3000
published: 8086
protocol: tcp
mode: ingress
healthcheck:
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:3000/health"]
interval: 10s
retries: 5
start_period: 10s
timeout: 10s
worker:
image: gitea.futureporn.net/futureporn/our:latest

View File

@ -101,17 +101,17 @@ resource "vultr_vpc" "futureporn_vpc" {
region = "ord"
}
# resource "bunnynet_dns_record" "future_porn_a" {
# for_each = zipmap(
# range(length(vultr_instance.our_vps)),
# vultr_instance.our_vps
# )
resource "bunnynet_dns_record" "future_porn_a" {
for_each = zipmap(
range(length(vultr_instance.loadbalancer)),
vultr_instance.loadbalancer
)
# zone = bunnynet_dns_zone.future_porn.id
# name = "*"
# type = "A"
# value = each.value.main_ip
# }
zone = bunnynet_dns_zone.future_porn.id
name = "*"
type = "A"
value = each.value.main_ip
}
@ -121,52 +121,34 @@ resource "bunnynet_dns_zone" "future_porn" {
# load balancing instance
# resource "vultr_instance" "loadbalancer" {
# count = 1
# hostname = "fp-lb-${count.index}"
# plan = "vc2-1c-2gb"
# region = "ord"
# backups = "disabled"
# ddos_protection = "false"
# os_id = 1743
# enable_ipv6 = true
# label = "fp lb ${count.index}"
# tags = ["futureporn", "loadbalancer", "our", "tofu"]
# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
# user_data = base64encode(var.vps_user_data)
# vpc_ids = [
# vultr_vpc.futureporn_vpc.id
# ]
# reserved_ip_id = vultr_reserved_ip.futureporn_v2_ip.id
# }
# our0
# resource "vultr_instance" "our_vps" {
# count = 1
# hostname = "fp-our-${count.index}"
# plan = "vc2-4c-8gb"
# region = "ord"
# backups = "disabled"
# ddos_protection = "false"
# os_id = 1743
# enable_ipv6 = true
# label = "fp our ${count.index}"
# tags = ["futureporn", "our", "tofu"]
# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
# user_data = base64encode(var.vps_user_data)
# vpc_ids = [
# vultr_vpc.futureporn_vpc.id
# ]
# }
resource "vultr_instance" "loadbalancer" {
count = 1
hostname = "fp-lb-${count.index}"
plan = "vc2-1c-2gb"
region = "ord"
backups = "disabled"
ddos_protection = "false"
os_id = 1743
enable_ipv6 = true
label = "fp lb ${count.index}"
tags = ["futureporn", "loadbalancer", "our", "tofu"]
ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
user_data = base64encode(var.vps_user_data)
vpc_ids = [
vultr_vpc.futureporn_vpc.id
]
reserved_ip_id = vultr_reserved_ip.futureporn_v2_ip.id
}
# resource "bunnynet_dns_record" "future_porn_apex" {
# zone = bunnynet_dns_zone.future_porn.id
# name = ""
# type = "A"
# value = vultr_instance.loadbalancer[0].main_ip
# ttl = 3600
# }
resource "bunnynet_dns_record" "future_porn_apex" {
zone = bunnynet_dns_zone.future_porn.id
name = ""
type = "A"
value = vultr_instance.loadbalancer[0].main_ip
ttl = 3600
}
resource "bunnynet_dns_record" "www_future_porn" {
@ -192,7 +174,7 @@ resource "vultr_instance" "swarm_node" {
os_id = 1743
enable_ipv6 = true
label = "swarm node ${count.index}"
tags = ["fp", "our", "server", "tofu"]
tags = ["fp", "swarm", "server", "tofu"]
ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
vpc_ids = [
vultr_vpc.futureporn_vpc.id
@ -296,23 +278,23 @@ resource "vultr_block_storage" "ipfs_blockstorage" {
# }
# }
# resource "ansible_host" "loadbalancer" {
# count = length(vultr_instance.loadbalancer)
resource "ansible_host" "loadbalancer" {
count = length(vultr_instance.loadbalancer)
# name = vultr_instance.loadbalancer[count.index].hostname
# groups = ["loadbalancer"]
# variables = {
# ansible_host = vultr_instance.loadbalancer[count.index].main_ip
# internal_ip = vultr_instance.loadbalancer[count.index].internal_ip
# }
# }
name = vultr_instance.loadbalancer[count.index].hostname
groups = ["loadbalancer"]
variables = {
ansible_host = vultr_instance.loadbalancer[count.index].main_ip
internal_ip = vultr_instance.loadbalancer[count.index].internal_ip
}
}
resource "ansible_host" "swarm_node" {
for_each = { for idx, host in vultr_instance.swarm_node : idx => host }
name = each.value.hostname
groups = ["swarm-node"]
groups = ["swarm"]
variables = {
ansible_host = each.value.main_ip
internal_ip = each.value.internal_ip
@ -337,18 +319,18 @@ resource "ansible_host" "swarm_node" {
# }
# }
# resource "ansible_host" "our" {
# for_each = { for idx, host in vultr_instance.our_vps : idx => host }
# name = each.value.hostname
# groups = ["our"]
resource "ansible_host" "swarm" {
for_each = { for idx, host in vultr_instance.swarm_node : idx => host }
name = each.value.hostname
groups = ["swarm"]
# variables = {
# ansible_host = each.value.main_ip
# internal_ip = each.value.internal_ip
# vultr_instance_id = each.value.id
# vultr_vfs_storage_id = vultr_virtual_file_system_storage.vfs.id
# }
# }
variables = {
ansible_host = each.value.main_ip
internal_ip = each.value.internal_ip
vultr_instance_id = each.value.id
vultr_vfs_storage_id = vultr_virtual_file_system_storage.vfs.id
}
}
resource "vultr_virtual_file_system_storage" "vfs" {
label = "fp-vfs-cache"
@ -366,8 +348,8 @@ resource "vultr_virtual_file_system_storage" "vfs" {
# }
resource "ansible_group" "swarm-node" {
name = "swarm-node"
resource "ansible_group" "swarm" {
name = "swarm"
}
@ -376,9 +358,9 @@ resource "ansible_group" "our" {
}
# resource "ansible_group" "loadbalancer" {
# name = "loadbalancer"
# }
resource "ansible_group" "loadbalancer" {
name = "loadbalancer"
}
resource "ansible_group" "ipfs" {
name = "ipfs"
@ -387,10 +369,9 @@ resource "ansible_group" "ipfs" {
resource "ansible_group" "futureporn" {
name = "futureporn"
children = [
# "loadbalancer",
"loadbalancer",
"capture",
"swarm-node",
"our",
"swarm",
"ipfs"
]
}