From 8ef71691e0d5f8c3c6cbfba777249bc901d0ce9c Mon Sep 17 00:00:00 2001 From: CJ_Clippy Date: Tue, 7 Oct 2025 04:12:07 -0800 Subject: [PATCH] use container port 3000 --- ansible/roles/ipfs/files/pinall.sh | 83 +++++----- ansible/roles/loadbalancer/handlers/main.yml | 13 ++ ansible/roles/loadbalancer/tasks/main.yml | 8 +- .../roles/loadbalancer/templates/Caddyfile.j2 | 30 ++-- ansible/roles/our/tasks/firewall.yml | 10 ++ ansible/roles/our/tasks/main.yml | 10 +- ansible/roles/our/tasks/stack.yml | 2 + ansible/roles/rssapp/tasks/firewall.yml | 10 ++ ansible/roles/rssapp/tasks/main.yml | 11 ++ ansible/roles/rssapp/tasks/service.yml | 32 ++++ ansible/roles/swarm/defaults/main.yml | 1 - ansible/roles/swarm/tasks/firewall.yml | 13 ++ ansible/roles/swarm/tasks/main.yml | 6 +- ansible/roles/swarm/tasks/manager.yml | 2 +- ansible/roles/swarm/tasks/worker.yml | 21 +-- ansible/site.yml | 22 ++- ansible/vars/main.yml | 5 +- services/our/compose.blue.yaml | 7 - services/our/compose.db.yaml | 13 -- services/our/compose.green.yaml | 7 - services/our/compose.production.yaml | 10 +- terraform/main.tf | 149 ++++++++---------- 22 files changed, 257 insertions(+), 208 deletions(-) create mode 100644 ansible/roles/loadbalancer/handlers/main.yml create mode 100644 ansible/roles/our/tasks/firewall.yml create mode 100644 ansible/roles/rssapp/tasks/firewall.yml create mode 100644 ansible/roles/rssapp/tasks/main.yml create mode 100644 ansible/roles/rssapp/tasks/service.yml create mode 100644 ansible/roles/swarm/tasks/firewall.yml delete mode 100644 services/our/compose.blue.yaml delete mode 100644 services/our/compose.db.yaml delete mode 100644 services/our/compose.green.yaml diff --git a/ansible/roles/ipfs/files/pinall.sh b/ansible/roles/ipfs/files/pinall.sh index 63127a4..22a074e 100644 --- a/ansible/roles/ipfs/files/pinall.sh +++ b/ansible/roles/ipfs/files/pinall.sh @@ -5,34 +5,10 @@ set -euo pipefail # Script: pinall.sh # # Description: -# This script reads a list of S3 object keys (filenames) -# from a file, downloads each file from Backblaze B2, -# adds it to a local IPFS node, and optionally cleans up -# the temporary downloaded file to save disk space. -# -# Usage: -# ./pinall.sh -# Example: -# # sudo -u ipfs env IPFS_PATH=/mnt/blockstorage/ipfs pinall.sh /home/ipfs/filenames.txt -# -# - files.txt should contain one S3 key per line. -# - Lines starting with '#' or empty lines are ignored. -# -# Environment: -# - Requires `b2` CLI configured with B2 credentials. -# - Requires an IPFS node installed and accessible at -# $IPFS_PATH (set in script). -# -# Behavior: -# 1. Reads each key from the input file. -# 2. Downloads the file from B2 to /tmp/. -# 3. Adds the downloaded file to IPFS (CID version 1). -# 4. Deletes the temporary file after adding to IPFS. -# 5. Logs progress with timestamps to stdout. -# -# Exit Codes: -# - 0: All files processed successfully. -# - 1: Incorrect usage or missing input file. +# Downloads all files listed in an input file to a temporary +# directory (only if not already present), then pins them +# all to a local IPFS node. Cleans up temp files only if +# all downloads and pins succeeded. # ############################################################ @@ -42,31 +18,56 @@ if [ $# -ne 1 ]; then exit 1 fi +FILELIST=$1 +TMPDIR="/mnt/blockstorage/pinalltmp" + +# Ensure tmp directory exists +mkdir -p "$TMPDIR" + ipfs id echo "Using IPFS_PATH=$IPFS_PATH" -FILELIST=$1 +# Track overall success +ALL_OK=true +# First pass: download files if not already present +echo "[$(date +"%Y-%m-%d %H:%M:%S")] Starting downloads..." while IFS= read -r KEY; do [[ -z "$KEY" || "$KEY" =~ ^# ]] && continue - echo "[$(date +"%Y-%m-%d %H:%M:%S")] Downloading $KEY from B2..." - TMPFILE="/tmp/$KEY" + TMPFILE="$TMPDIR/$KEY" + if [ -f "$TMPFILE" ]; then + echo "[$(date +"%Y-%m-%d %H:%M:%S")] File already exists, skipping: $KEY" + continue + fi + + echo "[$(date +"%Y-%m-%d %H:%M:%S")] Downloading $KEY to $TMPFILE..." if b2 file download "b2://futureporn/$KEY" "$TMPFILE"; then echo "[$(date +"%Y-%m-%d %H:%M:%S")] Download complete: $KEY" else echo "[$(date +"%Y-%m-%d %H:%M:%S")] Download failed: $KEY" - rm -f "$TMPFILE" - continue + ALL_OK=false fi - - echo "[$(date +"%Y-%m-%d %H:%M:%S")] Adding $KEY to IPFS..." - ipfs add --cid-version=1 "$TMPFILE" - - # optional cleanup to save space - rm -f "$TMPFILE" - done < "$FILELIST" -echo "[$(date +"%Y-%m-%d %H:%M:%S")] All tasks complete." +# Second pass: pin all files +echo "[$(date +"%Y-%m-%d %H:%M:%S")] Starting IPFS pinning..." +for FILE in "$TMPDIR"/*; do + [[ ! -f "$FILE" ]] && continue + echo "[$(date +"%Y-%m-%d %H:%M:%S")] Adding $(basename "$FILE") to IPFS..." + if ! ipfs add --cid-version=1 "$FILE"; then + echo "[$(date +"%Y-%m-%d %H:%M:%S")] IPFS add failed for $(basename "$FILE")" + ALL_OK=false + fi +done + +# Cleanup only if all succeeded +if [ "$ALL_OK" = true ]; then + echo "[$(date +"%Y-%m-%d %H:%M:%S")] All downloads and pins succeeded. Cleaning up temporary files..." + rm -rf "$TMPDIR"/* +else + echo "[$(date +"%Y-%m-%d %H:%M:%S")] Some operations failed. Leaving temporary files for inspection." +fi + +echo "[$(date +"%Y-%m-%d %H:%M:%S")] Script finished." diff --git a/ansible/roles/loadbalancer/handlers/main.yml b/ansible/roles/loadbalancer/handlers/main.yml new file mode 100644 index 0000000..5d86bdf --- /dev/null +++ b/ansible/roles/loadbalancer/handlers/main.yml @@ -0,0 +1,13 @@ +--- + +- name: Reload caddy + ansible.builtin.systemd_service: + name: caddy + state: reloaded + +- name: Restart caddy + ansible.builtin.systemd_service: + name: caddy + state: restarted + enabled: true + daemon_reload: true diff --git a/ansible/roles/loadbalancer/tasks/main.yml b/ansible/roles/loadbalancer/tasks/main.yml index 064ce5a..3134e21 100644 --- a/ansible/roles/loadbalancer/tasks/main.yml +++ b/ansible/roles/loadbalancer/tasks/main.yml @@ -1,4 +1,9 @@ --- + +- name: Debuggy + debug: + msg: "our_published_port={{ our_published_port }}" + - name: Configure firewall community.general.ufw: rule: allow @@ -11,6 +16,7 @@ - name: Install Caddy ansible.builtin.import_role: name: nvjacobo.caddy + notify: Restart caddy - name: Create html dir ansible.builtin.file: @@ -23,4 +29,4 @@ src: 'templates/Caddyfile.j2' dest: /etc/caddy/Caddyfile mode: "0644" - notify: reload caddy + notify: Reload caddy diff --git a/ansible/roles/loadbalancer/templates/Caddyfile.j2 b/ansible/roles/loadbalancer/templates/Caddyfile.j2 index 354d5cd..5f8df5c 100644 --- a/ansible/roles/loadbalancer/templates/Caddyfile.j2 +++ b/ansible/roles/loadbalancer/templates/Caddyfile.j2 @@ -1,21 +1,21 @@ -{% set sites = ['future.porn', 'pgadmin.sbtp.xyz', 'rssapp.sbtp.xyz'] %} +{% set sites = { + 'future.porn': our_published_port, + 'pgadmin.sbtp.xyz': 9095, + 'rssapp.sbtp.xyz': 9096 +} %} -{% for site in sites %} +{% for site, port in sites.items() %} {{ site }} { - # Define the upstream servers (docker swarm nodes) for load balancing - reverse_proxy {% for host in groups['our'] %}{{ hostvars[host]['internal_ip'] }}:{{ our_server_port }} {% endfor %} { - # Load balancing policy (optional, defaults to "random") - lb_policy least_connections + # Define the upstream servers (docker swarm nodes) for load balancing + reverse_proxy {% for host in groups['swarm'] %}{{ hostvars[host]['internal_ip'] }}:{{ port }} {% endfor %} { + health_uri /health + health_interval 10s + health_timeout 5s + } - # Health checks - health_uri /health - health_interval 10s - health_timeout 5s - } - - handle_errors { - respond "💥 Error; Please try again later. Code {err.status_code} | {err.status_text}." - } + handle_errors { + respond "💥 Error; Please try again later. Code {err.status_code} | {err.status_text}. Our code monkeys have been deployed to fix the issue~" + } } {% endfor %} diff --git a/ansible/roles/our/tasks/firewall.yml b/ansible/roles/our/tasks/firewall.yml new file mode 100644 index 0000000..d2ae72d --- /dev/null +++ b/ansible/roles/our/tasks/firewall.yml @@ -0,0 +1,10 @@ +--- +- name: Allow Docker Swarm ports on enp8s0 + community.general.ufw: + rule: allow + port: "{{ item.port }}" + proto: "{{ item.proto }}" + direction: in + interface: enp8s0 + loop: + - { port: "{{ our_published_port }}", proto: tcp } # our server diff --git a/ansible/roles/our/tasks/main.yml b/ansible/roles/our/tasks/main.yml index 7ef34b2..4f392c4 100644 --- a/ansible/roles/our/tasks/main.yml +++ b/ansible/roles/our/tasks/main.yml @@ -1,13 +1,13 @@ --- +- name: Configure firewall + ansible.builtin.include_tasks: + file: firewall.yml + - name: Configure filesystem ansible.builtin.include_tasks: file: filesystem.yml -- name: Configure docker stack - ansible.builtin.include_tasks: - file: stack.yml - -- name: Deploy our via docker stack +- name: Configure docker stack app ansible.builtin.include_tasks: file: stack.yml diff --git a/ansible/roles/our/tasks/stack.yml b/ansible/roles/our/tasks/stack.yml index 6465ce9..7d34579 100644 --- a/ansible/roles/our/tasks/stack.yml +++ b/ansible/roles/our/tasks/stack.yml @@ -11,6 +11,7 @@ scope: swarm state: present driver: overlay + when: inventory_hostname == groups['swarm'] | first - name: Clone the latest code ansible.builtin.git: @@ -31,6 +32,7 @@ - compose.production.yaml - name: Deploy stack + when: inventory_hostname == groups['swarm'] | first community.docker.docker_stack: state: present name: our diff --git a/ansible/roles/rssapp/tasks/firewall.yml b/ansible/roles/rssapp/tasks/firewall.yml new file mode 100644 index 0000000..3a3bda0 --- /dev/null +++ b/ansible/roles/rssapp/tasks/firewall.yml @@ -0,0 +1,10 @@ +--- +- name: Allow Docker Swarm ports on enp8s0 + community.general.ufw: + rule: allow + port: "{{ item.port }}" + proto: "{{ item.proto }}" + direction: in + interface: enp8s0 + loop: + - { port: 9096, proto: tcp } # rssapp diff --git a/ansible/roles/rssapp/tasks/main.yml b/ansible/roles/rssapp/tasks/main.yml new file mode 100644 index 0000000..3676c3c --- /dev/null +++ b/ansible/roles/rssapp/tasks/main.yml @@ -0,0 +1,11 @@ +--- + + +- name: Configure rssapp docker service + ansible.builtin.include_tasks: + file: service.yml + when: inventory_hostname == groups['swarm'] | first + +- name: Configure rssapp firewall + ansible.builtin.include_tasks: + file: firewall.yml diff --git a/ansible/roles/rssapp/tasks/service.yml b/ansible/roles/rssapp/tasks/service.yml new file mode 100644 index 0000000..7f82f87 --- /dev/null +++ b/ansible/roles/rssapp/tasks/service.yml @@ -0,0 +1,32 @@ +--- +- name: Deploy rssapp service to Docker Swarm + community.docker.docker_swarm_service: + name: rssapp + image: gitea.futureporn.net/futureporn/rssapp:latest + + labels: + net.futureporn.service: "rssapp" + + healthcheck: + test: ["CMD", "wget", "-qO-", "http://127.0.0.1:3000/health"] + interval: 1m + timeout: 10s + retries: 3 + start_period: 15s + + restart_config: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 60s + + publish: + - published_port: 9096 + target_port: 3000 + protocol: tcp + mode: ingress + + env: + APIFY_TOKEN: "{{ lookup('dotenv', 'RSSAPP_APIFY_TOKEN', file='../../../../.env.production') }}" + ORIGIN: "{{ lookup('dotenv', 'RSSAPP_ORIGIN', file='../../../../.env.production') }}" + WHITELIST: "{{ lookup('dotenv', 'RSSAPP_WHITELIST', file='../../../../.env.production') }}" diff --git a/ansible/roles/swarm/defaults/main.yml b/ansible/roles/swarm/defaults/main.yml index 5003bb6..85d9180 100644 --- a/ansible/roles/swarm/defaults/main.yml +++ b/ansible/roles/swarm/defaults/main.yml @@ -1,3 +1,2 @@ --- swarm_enable_manager: false -swarm_enable_worker: false diff --git a/ansible/roles/swarm/tasks/firewall.yml b/ansible/roles/swarm/tasks/firewall.yml new file mode 100644 index 0000000..2b29209 --- /dev/null +++ b/ansible/roles/swarm/tasks/firewall.yml @@ -0,0 +1,13 @@ +--- +- name: Allow Docker Swarm ports on enp8s0 + community.general.ufw: + rule: allow + port: "{{ item.port }}" + proto: "{{ item.proto }}" + direction: in + interface: enp8s0 + loop: + - { port: 2377, proto: tcp } # Swarm control plane + - { port: 7946, proto: tcp } # Gossip TCP + - { port: 7946, proto: udp } # Gossip UDP + - { port: 4789, proto: udp } # Overlay network VXLAN diff --git a/ansible/roles/swarm/tasks/main.yml b/ansible/roles/swarm/tasks/main.yml index e419c00..f33c705 100644 --- a/ansible/roles/swarm/tasks/main.yml +++ b/ansible/roles/swarm/tasks/main.yml @@ -1,5 +1,9 @@ --- +- name: Configure firewall + ansible.builtin.include_tasks: + file: firewall.yml + - name: Configure swarm manager ansible.builtin.include_tasks: file: manager.yml @@ -8,4 +12,4 @@ - name: Configure swarm worker ansible.builtin.include_tasks: file: worker.yml - when: swarm_enable_worker + when: not swarm_enable_manager diff --git a/ansible/roles/swarm/tasks/manager.yml b/ansible/roles/swarm/tasks/manager.yml index 4a93906..c5f83f6 100644 --- a/ansible/roles/swarm/tasks/manager.yml +++ b/ansible/roles/swarm/tasks/manager.yml @@ -7,7 +7,7 @@ register: swarm_create - name: Set join tokens as host facts (manager only) - set_fact: + ansible.builtin.set_fact: swarm_worker_join_token: "{{ swarm_create.swarm_facts.JoinTokens.Worker }}" swarm_manager_join_token: "{{ swarm_create.swarm_facts.JoinTokens.Manager }}" diff --git a/ansible/roles/swarm/tasks/worker.yml b/ansible/roles/swarm/tasks/worker.yml index eb87c92..23f438f 100644 --- a/ansible/roles/swarm/tasks/worker.yml +++ b/ansible/roles/swarm/tasks/worker.yml @@ -1,19 +1,12 @@ --- -- debug: - var: groups['swarm-node'] - -- name: Get all swarm nodes except the first one - set_fact: - swarm_worker_ips: "{{ groups['swarm-node'][1:] }}" +- name: Debug + ansible.builtin.debug: + var: groups['swarm'] - name: Join worker nodes community.docker.docker_swarm: state: join - advertise_addr: "{{ internal_ip }}:4567" - join_token: "{{ hostvars[groups['swarm-node'] | first]['swarm_worker_join_token'] }}" - remote_addrs: swarm_worker_ips -# - name: Join swarm as worker -# community.docker.docker_swarm: -# state: joined -# join_token: "{{ hostvars[groups['swarm-node'] | first].swarm_worker_join_token }}" -# remote_addrs: ["{{ hostvars[groups['swarm-node'] | first].internal_ip }}:2377"] + advertise_addr: "{{ internal_ip }}" + join_token: "{{ hostvars[groups['swarm'] | first]['swarm_worker_join_token'] }}" + remote_addrs: + - "{{ hostvars[groups['swarm'] | first]['internal_ip'] }}:2377" diff --git a/ansible/site.yml b/ansible/site.yml index b526050..5e8044d 100644 --- a/ansible/site.yml +++ b/ansible/site.yml @@ -9,7 +9,7 @@ - bootstrap - name: Assert common dependencies - hosts: swarm-node + hosts: all gather_facts: true check_mode: false become: true @@ -17,21 +17,24 @@ - common - docker - -# the decision of worker vs manager is set in ansible inventory by opentofu - name: Set up docker swarm - hosts: swarm-node + hosts: swarm gather_facts: true roles: - swarm + vars: + swarm_enable_manager: "{{ inventory_hostname == groups['swarm'][0] }}" -- name: Assert our dependencies - hosts: swarm-node +- name: Setup swarm apps + hosts: swarm gather_facts: true check_mode: false become: true + vars_files: + - vars/main.yml roles: - our + - rssapp - name: Configure load balancer hosts: loadbalancer @@ -68,13 +71,6 @@ - ipfs -# - name: Set up our app -# hosts: swarm-node -# gather_facts: true -# roles: -# - our -# tags: -# - our # - name: Install Capture instance # hosts: capture diff --git a/ansible/vars/main.yml b/ansible/vars/main.yml index 5b25b38..271efb2 100644 --- a/ansible/vars/main.yml +++ b/ansible/vars/main.yml @@ -5,7 +5,7 @@ # infisical_url: "{{ lookup('dotenv', 'INFISICAL_URL', file='../.env' )}}" # infisical_env_slug: prod # infisical_secrets: >- -# {{ +# {{ # lookup( # 'infisical.vault.read_secrets', # universal_auth_client_id=infisical_client_id, @@ -23,6 +23,5 @@ s3_region: us-west-000 s3_endpoint: https://s3.us-west-000.backblazeb2.com kubo_version: v0.34.1 - - our_server_port: 3000 +our_published_port: 8086 diff --git a/services/our/compose.blue.yaml b/services/our/compose.blue.yaml deleted file mode 100644 index 34ead23..0000000 --- a/services/our/compose.blue.yaml +++ /dev/null @@ -1,7 +0,0 @@ -services: - server: - ports: - - target: 5000 - published: 8087 - protocol: tcp - mode: ingress diff --git a/services/our/compose.db.yaml b/services/our/compose.db.yaml deleted file mode 100644 index b68a7a5..0000000 --- a/services/our/compose.db.yaml +++ /dev/null @@ -1,13 +0,0 @@ -services: - # pgadmin: - # image: dpage/pgadmin4:latest - # ports: - # - target: 5050 - # published: 8095 - # protocol: tcp - # mode: ingress - -networks: - default: - external: true - name: spooky diff --git a/services/our/compose.green.yaml b/services/our/compose.green.yaml deleted file mode 100644 index 9421961..0000000 --- a/services/our/compose.green.yaml +++ /dev/null @@ -1,7 +0,0 @@ -services: - server: - ports: - - target: 5000 # container port - published: 8086 # Swarm ingress port - protocol: tcp - mode: ingress diff --git a/services/our/compose.production.yaml b/services/our/compose.production.yaml index 3ac34c6..d80bff7 100644 --- a/services/our/compose.production.yaml +++ b/services/our/compose.production.yaml @@ -7,10 +7,16 @@ services: volumes: - /mnt/vfs/futureporn:/mnt/vfs/futureporn ports: - - target: 5000 # container port - published: 8086 # Swarm ingress port + - target: 3000 + published: 8086 protocol: tcp mode: ingress + healthcheck: + test: ["CMD", "wget", "-qO-", "http://127.0.0.1:3000/health"] + interval: 10s + retries: 5 + start_period: 10s + timeout: 10s worker: image: gitea.futureporn.net/futureporn/our:latest diff --git a/terraform/main.tf b/terraform/main.tf index ddf1b4b..3848bed 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -101,17 +101,17 @@ resource "vultr_vpc" "futureporn_vpc" { region = "ord" } -# resource "bunnynet_dns_record" "future_porn_a" { -# for_each = zipmap( -# range(length(vultr_instance.our_vps)), -# vultr_instance.our_vps -# ) +resource "bunnynet_dns_record" "future_porn_a" { + for_each = zipmap( + range(length(vultr_instance.loadbalancer)), + vultr_instance.loadbalancer + ) -# zone = bunnynet_dns_zone.future_porn.id -# name = "*" -# type = "A" -# value = each.value.main_ip -# } + zone = bunnynet_dns_zone.future_porn.id + name = "*" + type = "A" + value = each.value.main_ip +} @@ -121,52 +121,34 @@ resource "bunnynet_dns_zone" "future_porn" { # load balancing instance -# resource "vultr_instance" "loadbalancer" { -# count = 1 -# hostname = "fp-lb-${count.index}" -# plan = "vc2-1c-2gb" -# region = "ord" -# backups = "disabled" -# ddos_protection = "false" -# os_id = 1743 -# enable_ipv6 = true -# label = "fp lb ${count.index}" -# tags = ["futureporn", "loadbalancer", "our", "tofu"] -# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID] -# user_data = base64encode(var.vps_user_data) -# vpc_ids = [ -# vultr_vpc.futureporn_vpc.id -# ] -# reserved_ip_id = vultr_reserved_ip.futureporn_v2_ip.id -# } - -# our0 -# resource "vultr_instance" "our_vps" { -# count = 1 -# hostname = "fp-our-${count.index}" -# plan = "vc2-4c-8gb" -# region = "ord" -# backups = "disabled" -# ddos_protection = "false" -# os_id = 1743 -# enable_ipv6 = true -# label = "fp our ${count.index}" -# tags = ["futureporn", "our", "tofu"] -# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID] -# user_data = base64encode(var.vps_user_data) -# vpc_ids = [ -# vultr_vpc.futureporn_vpc.id -# ] -# } +resource "vultr_instance" "loadbalancer" { + count = 1 + hostname = "fp-lb-${count.index}" + plan = "vc2-1c-2gb" + region = "ord" + backups = "disabled" + ddos_protection = "false" + os_id = 1743 + enable_ipv6 = true + label = "fp lb ${count.index}" + tags = ["futureporn", "loadbalancer", "our", "tofu"] + ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID] + user_data = base64encode(var.vps_user_data) + vpc_ids = [ + vultr_vpc.futureporn_vpc.id + ] + reserved_ip_id = vultr_reserved_ip.futureporn_v2_ip.id +} -# resource "bunnynet_dns_record" "future_porn_apex" { -# zone = bunnynet_dns_zone.future_porn.id -# name = "" -# type = "A" -# value = vultr_instance.loadbalancer[0].main_ip -# ttl = 3600 -# } + +resource "bunnynet_dns_record" "future_porn_apex" { + zone = bunnynet_dns_zone.future_porn.id + name = "" + type = "A" + value = vultr_instance.loadbalancer[0].main_ip + ttl = 3600 +} resource "bunnynet_dns_record" "www_future_porn" { @@ -192,7 +174,7 @@ resource "vultr_instance" "swarm_node" { os_id = 1743 enable_ipv6 = true label = "swarm node ${count.index}" - tags = ["fp", "our", "server", "tofu"] + tags = ["fp", "swarm", "server", "tofu"] ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID] vpc_ids = [ vultr_vpc.futureporn_vpc.id @@ -296,23 +278,23 @@ resource "vultr_block_storage" "ipfs_blockstorage" { # } # } -# resource "ansible_host" "loadbalancer" { -# count = length(vultr_instance.loadbalancer) +resource "ansible_host" "loadbalancer" { + count = length(vultr_instance.loadbalancer) -# name = vultr_instance.loadbalancer[count.index].hostname -# groups = ["loadbalancer"] -# variables = { -# ansible_host = vultr_instance.loadbalancer[count.index].main_ip -# internal_ip = vultr_instance.loadbalancer[count.index].internal_ip -# } -# } + name = vultr_instance.loadbalancer[count.index].hostname + groups = ["loadbalancer"] + variables = { + ansible_host = vultr_instance.loadbalancer[count.index].main_ip + internal_ip = vultr_instance.loadbalancer[count.index].internal_ip + } +} resource "ansible_host" "swarm_node" { for_each = { for idx, host in vultr_instance.swarm_node : idx => host } name = each.value.hostname - groups = ["swarm-node"] + groups = ["swarm"] variables = { ansible_host = each.value.main_ip internal_ip = each.value.internal_ip @@ -337,18 +319,18 @@ resource "ansible_host" "swarm_node" { # } # } -# resource "ansible_host" "our" { -# for_each = { for idx, host in vultr_instance.our_vps : idx => host } -# name = each.value.hostname -# groups = ["our"] +resource "ansible_host" "swarm" { + for_each = { for idx, host in vultr_instance.swarm_node : idx => host } + name = each.value.hostname + groups = ["swarm"] -# variables = { -# ansible_host = each.value.main_ip -# internal_ip = each.value.internal_ip -# vultr_instance_id = each.value.id -# vultr_vfs_storage_id = vultr_virtual_file_system_storage.vfs.id -# } -# } + variables = { + ansible_host = each.value.main_ip + internal_ip = each.value.internal_ip + vultr_instance_id = each.value.id + vultr_vfs_storage_id = vultr_virtual_file_system_storage.vfs.id + } +} resource "vultr_virtual_file_system_storage" "vfs" { label = "fp-vfs-cache" @@ -366,8 +348,8 @@ resource "vultr_virtual_file_system_storage" "vfs" { # } -resource "ansible_group" "swarm-node" { - name = "swarm-node" +resource "ansible_group" "swarm" { + name = "swarm" } @@ -376,9 +358,9 @@ resource "ansible_group" "our" { } -# resource "ansible_group" "loadbalancer" { -# name = "loadbalancer" -# } +resource "ansible_group" "loadbalancer" { + name = "loadbalancer" +} resource "ansible_group" "ipfs" { name = "ipfs" @@ -387,10 +369,9 @@ resource "ansible_group" "ipfs" { resource "ansible_group" "futureporn" { name = "futureporn" children = [ - # "loadbalancer", + "loadbalancer", "capture", - "swarm-node", - "our", + "swarm", "ipfs" ] }