fp/terraform/main.tf
CJ_Clippy bf53429e0f
Some checks failed
fp/our CI/CD / build (push) Successful in 6m48s
ci / test (push) Failing after 7m33s
use command instead of entrypoing
2025-10-05 22:19:07 -08:00

440 lines
11 KiB
HCL

// load secrets from file
// @see https://stackoverflow.com/a/67653301/1004931
// @see https://grep.app/search?q=for+tuple+in+regexall%28
// @see https://github.com/lrmendess/open-source-datalake/blob/main/minio.tf
locals {
envs = { for tuple in regexall("(.*)=(.*)", file("../.env.production")) : tuple[0] => sensitive(tuple[1]) }
}
variable "ipfs_hosts" {
description = "List of IP addresses for IPFS nodes"
type = list(string)
default = ["38.242.193.246"]
}
variable "our_port" {
default = "5000"
}
variable "public_s3_endpoint" {
default = "https://fp-usc.b-cdn.net"
}
variable "site_url" {
default = "https://future.porn"
}
variable "aws_bucket" {
default = "fp-usc"
}
variable "aws_region" {
default = "us-west-000"
}
variable "aws_host" {
default = "s3.us-west-000.backblazeb2.com"
}
variable "vps_user_data" {
# most packages are installed using ansible, but we do use cloud-config
# to install python3, an ansible dependency
default = <<-EOT
#cloud-config
package_update: true
packages:
- python3
- fail2ban
# @see https://gist.github.com/NatElkins/20880368b797470f3bc6926e3563cb26 for more hardening ideas
EOT
}
terraform {
required_providers {
vultr = {
source = "vultr/vultr"
version = "2.26.0"
}
ansible = {
source = "ansible/ansible"
version = "1.2.0"
}
bunnynet = {
source = "BunnyWay/bunnynet"
}
}
}
provider "vultr" {
api_key = local.envs.VULTR_API_KEY
}
provider "bunnynet" {
api_key = local.envs.BUNNY_API_KEY
}
# reserved IP lets us spin down the system and spin up without losing the IP reservation
resource "vultr_reserved_ip" "futureporn_v2_ip" {
label = "futureporn-v2"
region = "ord"
ip_type = "v4"
}
# resource "vultr_reserved_ip" "futureporn_tracker_ip" {
# label = "futureporn-tracker"
# region = "ord"
# ip_type = "v4"
# }
# Virtual Private Cloud for connecting many VPS together on a private network
# We use this network connection for loadbalancer<->server<->worker<->db comms.
resource "vultr_vpc" "futureporn_vpc" {
description = "Futureporn V2 VPC"
region = "ord"
}
# resource "bunnynet_dns_record" "future_porn_a" {
# for_each = zipmap(
# range(length(vultr_instance.our_vps)),
# vultr_instance.our_vps
# )
# zone = bunnynet_dns_zone.future_porn.id
# name = "*"
# type = "A"
# value = each.value.main_ip
# }
resource "bunnynet_dns_zone" "future_porn" {
domain = "future.porn"
}
# load balancing instance
# resource "vultr_instance" "loadbalancer" {
# count = 1
# hostname = "fp-lb-${count.index}"
# plan = "vc2-1c-2gb"
# region = "ord"
# backups = "disabled"
# ddos_protection = "false"
# os_id = 1743
# enable_ipv6 = true
# label = "fp lb ${count.index}"
# tags = ["futureporn", "loadbalancer", "our", "tofu"]
# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
# user_data = base64encode(var.vps_user_data)
# vpc_ids = [
# vultr_vpc.futureporn_vpc.id
# ]
# reserved_ip_id = vultr_reserved_ip.futureporn_v2_ip.id
# }
# our0
# resource "vultr_instance" "our_vps" {
# count = 1
# hostname = "fp-our-${count.index}"
# plan = "vc2-4c-8gb"
# region = "ord"
# backups = "disabled"
# ddos_protection = "false"
# os_id = 1743
# enable_ipv6 = true
# label = "fp our ${count.index}"
# tags = ["futureporn", "our", "tofu"]
# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
# user_data = base64encode(var.vps_user_data)
# vpc_ids = [
# vultr_vpc.futureporn_vpc.id
# ]
# }
# resource "bunnynet_dns_record" "future_porn_apex" {
# zone = bunnynet_dns_zone.future_porn.id
# name = ""
# type = "A"
# value = vultr_instance.loadbalancer[0].main_ip
# ttl = 3600
# }
resource "bunnynet_dns_record" "www_future_porn" {
zone = bunnynet_dns_zone.future_porn.id
name = "www"
type = "CNAME"
value = "future.porn"
ttl = 3600
}
# vultr instance for running our app
resource "vultr_instance" "swarm_node" {
count = 2
hostname = "swarm-node-${count.index}"
plan = "vc2-2c-4gb"
region = "ord"
backups = "disabled"
ddos_protection = "false"
os_id = 1743
enable_ipv6 = true
label = "swarm node ${count.index}"
tags = ["fp", "our", "server", "tofu"]
ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
vpc_ids = [
vultr_vpc.futureporn_vpc.id
]
user_data = base64encode(var.vps_user_data)
}
# # vultr instance meant for capturing VODs
# resource "vultr_instance" "capture_vps" {
# count = 0
# hostname = "fp-cap-${count.index}"
# plan = "vc2-2c-2gb"
# region = "ord"
# backups = "disabled"
# ddos_protection = "false"
# os_id = 1743
# enable_ipv6 = true
# vpc_ids = [vultr_vpc.futureporn_vpc.id]
# label = "fp capture ${count.index}"
# tags = ["futureporn", "capture"]
# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
# user_data = base64encode(var.vps_user_data)
# }
# vultr instance with a GPU. experimental.
# resource "vultr_instance" "capture_vps" {
# count = 0
# hostname = "fp-cap-${count.index}"
# plan = "vcg-a16-2c-8g-2vram"
# region = "ord"
# backups = "disabled"
# ddos_protection = "false"
# # os_id = 1743
# image_id = "ubuntu-xfce"
# app_variables = {
# desktopuser = "cj_clippy"
# }
# enable_ipv6 = true
# vpc_ids = [vultr_vpc.futureporn_vpc.id]
# label = "fp capture ${count.index}"
# tags = ["futureporn", "capture"]
# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
# user_data = base64encode(var.vps_user_data)
# }
# This is our ipfs node with a really big dick, I mean disk
resource "vultr_instance" "ipfs_vps" {
count = 1
hostname = "fp-ipfs-${count.index}"
plan = "vc2-2c-2gb"
region = "ord"
backups = "disabled"
ddos_protection = "false"
os_id = 1743
enable_ipv6 = true
label = "fp ipfs ${count.index}"
tags = ["futureporn", "ipfs", "tofu"]
ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
user_data = base64encode(var.vps_user_data)
}
resource "ansible_host" "ipfs_vps" {
for_each = { for idx, host in vultr_instance.ipfs_vps : idx => host }
name = each.value.main_ip # <-- pick the IP or hostname here
groups = ["ipfs"]
variables = {
ansible_user = "root"
ansible_host = each.value.main_ip # <-- pick the IP here too
}
}
resource "vultr_block_storage" "ipfs_blockstorage" {
label = "fp-ipfs"
size_gb = 5000
region = "ord"
attached_to_instance = vultr_instance.ipfs_vps[0].id
}
# resource "ansible_host" "capture_vps" {
# for_each = { for idx, host in vultr_instance.capture_vps : idx => host }
# name = each.value.hostname
# groups = ["capture"] # Groups this host is part of.
# variables = {
# # Connection vars.
# ansible_user = "root"
# ansible_host = each.value.main_ip
# # Custom vars that we might use in roles/tasks.
# # hostname = "web1"
# # fqdn = "web1.example.com"
# }
# }
# resource "ansible_host" "loadbalancer" {
# count = length(vultr_instance.loadbalancer)
# name = vultr_instance.loadbalancer[count.index].hostname
# groups = ["loadbalancer"]
# variables = {
# ansible_host = vultr_instance.loadbalancer[count.index].main_ip
# internal_ip = vultr_instance.loadbalancer[count.index].internal_ip
# }
# }
resource "ansible_host" "swarm_node" {
for_each = { for idx, host in vultr_instance.swarm_node : idx => host }
name = each.value.hostname
groups = ["swarm-node"]
variables = {
ansible_host = each.value.main_ip
internal_ip = each.value.internal_ip
vultr_instance_id = each.value.id
vultr_vfs_storage_id = vultr_virtual_file_system_storage.vfs.id
# Set swarm manager only on the 0th host
swarm_enable_manager = each.key == 0 ? true : false
swarm_enable_worker = each.key == 0 ? false : true
}
}
# resource "ansible_host" "tracker" {
# for_each = { for idx, host in vultr_instance.tracker : idx => host }
# name = each.value.hostname
# groups = ["tracker"]
# variables = {
# ansible_host = each.value.main_ip
# internal_ip = each.value.internal_ip
# vultr_instance_id = each.value.id
# }
# }
# resource "ansible_host" "our" {
# for_each = { for idx, host in vultr_instance.our_vps : idx => host }
# name = each.value.hostname
# groups = ["our"]
# variables = {
# ansible_host = each.value.main_ip
# internal_ip = each.value.internal_ip
# vultr_instance_id = each.value.id
# vultr_vfs_storage_id = vultr_virtual_file_system_storage.vfs.id
# }
# }
resource "vultr_virtual_file_system_storage" "vfs" {
label = "fp-vfs-cache"
size_gb = 200
region = "ord"
tags = ["our", "vfs"]
attached_instances = vultr_instance.swarm_node[*].id
}
# resource "ansible_group" "capture" {
# name = "capture"
# }
resource "ansible_group" "swarm-node" {
name = "swarm-node"
}
resource "ansible_group" "our" {
name = "our"
}
# resource "ansible_group" "loadbalancer" {
# name = "loadbalancer"
# }
resource "ansible_group" "ipfs" {
name = "ipfs"
}
resource "ansible_group" "futureporn" {
name = "futureporn"
children = [
# "loadbalancer",
"capture",
"swarm-node",
"our",
"ipfs"
]
}
# user_data = base64encode(<<-EOT
# #cloud-config
# package_update: true
# packages:
# - git
# - mosh
# - mg
# - screen
# - tree
# - ncdu
# - pipx
# - ffmpeg
# - fd-find
# - npm
# runcmd:
# - git clone https://github.com/insanity54/dotfiles /root/dotfiles
# - cp /root/dotfiles/.screenrc /root/
# - curl -fsSL https://getcroc.schollz.com | bash
# - curl -fsSL get.docker.com | bash
# - ufw allow 60000:61000/udp
# - pipx install yt-dlp
# - pipx ensurepath
# - git clone https://github.com/insanity54/voddo /root/voddo
# - curl -fsSL https://gitea.futureporn.net/futureporn/fp/raw/branch/main/packages/scripts/thumbnail-generator.sh > ~/.local/bin/thumbnail-generator.sh
# - chmod +x ~/.local/bin/thumbnail-generator.sh
# - curl -fsSL https://github.com/Backblaze/B2_Command_Line_Tool/releases/download/v4.3.1/b2-linux > ~/.local/bin/b2
# - chmod +x ~/.local/bin/b2
# - export DIR=/usr/local/bin; curl https://raw.githubusercontent.com/jesseduffield/lazydocker/master/scripts/install_update_linux.sh | bash
# - curl -fsSL https://dist.ipfs.tech/kubo/v0.33.2/kubo_v0.33.2_linux-amd64.tar.gz > ~/kubo_v0.33.2_linux-amd64.tar.gz
# - tar xvzf ~/kubo_v0.33.2_linux-amd64.tar.gz
# - ~/kubo/install.sh
# - ufw allow 8080
# - ufw allow 8081
# - ufw allow 4001
# EOT
# )