add rssapp gitea actions builder
Some checks failed
rssapp CI/CD / build (push) Successful in 2m2s
ci / test (push) Failing after 1m5s
ci / build (push) Has been cancelled

This commit is contained in:
CJ_Clippy 2025-09-28 00:55:50 -08:00
parent ed8ee57815
commit cc0f0a33fa
20 changed files with 1702 additions and 220 deletions

View File

@ -0,0 +1,67 @@
name: rssapp CI/CD
on:
push:
branches:
- "main"
paths:
- "services/rssapp/**"
pull_request:
paths:
- "services/rssapp/**"
jobs:
build:
runs-on: ubuntu-latest
environment: docker
steps:
- name: Check out code
uses: actions/checkout@v3
with:
submodules: recursive
- name: Login to Gitea Docker Registry
uses: docker/login-action@v3
with:
registry: gitea.futureporn.net
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Build and Push futureporn/rssapp
uses: docker/build-push-action@v6
with:
context: ./services/rssapp
push: true
tags: gitea.futureporn.net/futureporn/rssapp:latest
build-args: |
NODE_ENV=production
labels: |
org.opencontainers.image.description=X.com RSS service for Futureporn internal tools
org.opencontainers.image.title=rssapp
org.opencontainers.image.licenses=unlicense
org.opencontainers.image.source=https://gitea.futureporn.net/futureporn/fp
org.opencontainers.image.url=https://gitea.futureporn.net/futureporn/-/packages/container/rssapp
- name: Clean up unused docker networks
run: docker network prune --force
# Optional: Trigger Dokploy Deployment
- name: Trigger Dokploy Deployment
if: github.ref == 'refs/heads/main'
run: |
curl -X POST \
-H "Content-Type: application/json" \
-H "x-gitea-event: Push Hook" \
-d "{
\"ref\": \"refs/heads/main\",
\"after\": \"${GITHUB_SHA}\",
\"commits\": [
{
\"id\": \"${GITHUB_SHA}\",
\"message\": \"Deployment from Gitea Actions\",
\"author\": { \"name\": \"${GITHUB_ACTOR}\" }
}
]
}" \
"${{ secrets.WEBHOOK_URL }}"

View File

@ -0,0 +1 @@
.env

144
services/actor/.gitignore vendored Normal file
View File

@ -0,0 +1,144 @@
# Created by https://www.toptal.com/developers/gitignore/api/node
# Edit at https://www.toptal.com/developers/gitignore?templates=node
### Node ###
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# Snowpack dependency directory (https://snowpack.dev/)
web_modules/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional stylelint cache
.stylelintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
out
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# vuepress v2.x temp and cache directory
.temp
# Docusaurus cache and generated files
.docusaurus
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
# Stores VSCode versions used for testing VSCode extensions
.vscode-test
# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*
### Node Patch ###
# Serverless Webpack directories
.webpack/
# Optional stylelint cache
# SvelteKit build / generate output
.svelte-kit
# End of https://www.toptal.com/developers/gitignore/api/node

10
services/actor/README.md Normal file
View File

@ -0,0 +1,10 @@
# actor
Spin up a powerful VPS to run act_runner workflows on demand
## vultr config
vhp-8c-16gb
Ubuntu 24.04 LTS x64
Chicago

1010
services/actor/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,19 @@
{
"name": "actor",
"version": "0.0.1",
"description": "Spin up a powerful VPS to run act_runner workflows on demand",
"license": "Unlicense",
"author": "",
"type": "module",
"main": "src/server.ts",
"scripts": {
"start": "tsx src/server.ts",
"test": "echo \"Error: no test specified\" && exit 1"
},
"dependencies": {
"@dotenvx/dotenvx": "^1.48.4",
"fastify": "^5.5.0",
"nunjucks": "^3.2.4",
"zod": "^4.0.17"
}
}

11
services/actor/src/env.ts Normal file
View File

@ -0,0 +1,11 @@
import '@dotenvx/dotenvx/config';
import { z } from 'zod';
export const EnvSchema = z.object({
VULTR_API_KEY: z.string(),
GITEA_RUNNER_REGISTRATION_TOKEN: z.string(),
VPS_SPEC: z.string().default('vhp-8c-16gb'),
WEBHOOK_PORT: z.coerce.number().default(3000),
});
export const env = EnvSchema.parse(process.env);

View File

@ -0,0 +1,21 @@
import Fastify from 'fastify';
import { env } from './env.js';
import { createVultrInstance } from './vultr.js';
const fastify = Fastify();
fastify.post('/webhook', async (request, reply) => {
try {
const instance = await createVultrInstance();
console.log('Created instance:', instance);
return { status: 'ok', instance };
} catch (err: any) {
console.error(err);
reply.status(500).send({ status: 'error', message: err.message });
}
});
fastify.listen({ port: env.WEBHOOK_PORT }, (err, addr) => {
if (err) throw err;
console.log(`Server listening at ${addr}`);
});

View File

@ -0,0 +1,46 @@
import { readFile } from 'fs/promises';
import nunjucks from 'nunjucks';
import { env } from './env.js';
nunjucks.configure({ autoescape: true });
export async function createVultrInstance(): Promise<any> {
// Load cloud-init template
const template = await readFile('./user-data.j2', 'utf-8');
const hostname = `gitea-runner-${Date.now()}`
// Render template with the runner token
const userData = nunjucks.renderString(template, {
GITEA_RUNNER_REGISTRATION_TOKEN: env.GITEA_RUNNER_REGISTRATION_TOKEN,
hostname,
});
const body = {
region: 'ord',
plan: env.VPS_SPEC,
os_id: 2284, // Ubuntu 22.04 x64
user_data: Buffer.from(userData).toString('base64'),
label: hostname,
hostname,
sshkey_id: null,
enable_ipv6: true,
enable_private_network: true,
};
const res = await fetch('https://api.vultr.com/v2/instances', {
method: 'POST',
headers: {
'Authorization': `Bearer ${env.VULTR_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify(body),
});
if (!res.ok) {
const text = await res.text();
throw new Error(`Failed to create VPS: ${res.status} ${text}`);
}
return res.json();
}

View File

@ -0,0 +1,51 @@
#cloud-config
package_update: true
packages:
- docker.io
- curl
- jq
write_files:
- path: /etc/act_runner/config.yaml
permissions: '0644'
owner: root:root
content: |
runner:
labels:
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
- "ubuntu-24.04:docker://gitea/runner-images:ubuntu-24.04"
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
workdir: /var/lib/act_runner
max_parallel: 1
- path: /etc/systemd/system/act_runner.service
permissions: '0644'
owner: root:root
content: |
[Unit]
Description=Gitea Actions act_runner service
After=docker.service
Wants=docker.service
[Service]
Type=simple
ExecStart=/usr/local/bin/act_runner daemon --config /etc/act_runner/config.yaml
WorkingDirectory=/var/lib/act_runner
User=root
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
runcmd:
- mkdir -p /var/lib/act_runner
- curl -L https://gitea.com/gitea/act_runner/releases/download/v0.2.12/act_runner-0.2.12-linux-amd64 -o /usr/local/bin/act_runner
- chmod +x /usr/local/bin/act_runner
- cd /var/lib/act_runner && /usr/local/bin/act_runner register --no-interactive --instance "https://gitea.futureporn.net" --token "{{ GITEA_RUNNER_REGISTRATION_TOKEN }}" --name "{{ hostname }}" --labels "ubuntu-22.04:docker"
- systemctl daemon-reload
- systemctl enable act_runner
- systemctl start act_runner

View File

@ -1,3 +1,5 @@
experiments
vibeui vibeui
venv venv
src/test src/test

View File

@ -0,0 +1,67 @@
FROM node:22
# Set working directory
WORKDIR /app
# Install system-level dependencies
RUN apt-get update -y && \
apt-get install -y --no-install-recommends \
build-essential \
git \
inotify-tools \
ffmpeg \
mktorrent \
python3 \
python3-pip \
python3-venv \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
# Install Shaka Packager
RUN wget -q https://github.com/shaka-project/shaka-packager/releases/download/v3.4.2/packager-linux-x64 \
-O /usr/local/bin/packager \
&& chmod +x /usr/local/bin/packager \
&& packager --version
# Install IPFS Kubo
COPY --from=ipfs/kubo:v0.36.0 /usr/local/bin/ipfs /usr/local/bin/ipfs
RUN ipfs init
# Bundle the vibeui pytorch model
RUN mkdir -p /app/vibeui \
&& wget -q https://gitea.futureporn.net/futureporn/fp/raw/branch/main/apps/vibeui/public/vibeui.pt -O /app/vibeui/vibeui.pt \
&& wget -q https://gitea.futureporn.net/futureporn/fp/raw/branch/main/apps/vibeui/public/data.yaml -O /app/vibeui/data.yaml
# Install openwhisper
COPY --from=ghcr.io/ggml-org/whisper.cpp:main-e7bf0294ec9099b5fc21f5ba969805dfb2108cea /app /app/whisper.cpp
ENV PATH="$PATH:/app/whisper.cpp/build/bin"
ENV LD_LIBRARY_PATH="/app/whisper.cpp/build/src:/app/whisper.cpp/build/ggml/src:/usr/local/lib:/usr/lib"
# Install b2-cli
RUN wget https://github.com/Backblaze/B2_Command_Line_Tool/releases/download/v4.4.1/b2-linux -O /usr/local/bin/b2 && chmod +x /usr/local/bin/b2
# Copy and install dependencies
COPY package.json package-lock.json ./
RUN --mount=type=cache,target=/root/.npm npm install --ignore-scripts=false --foreground-scripts --verbose
# Copy Prisma schema and generate client
COPY prisma ./prisma
RUN npx prisma generate
# Copy the rest of the code
COPY . .
# Build the app
RUN npm run build
# Setup Python virtualenv
RUN python3 -m venv /app/venv
ENV PATH="/app/venv/bin:$PATH"
# Install python deps
RUN ./venv/bin/pip install --no-cache-dir -r requirements.txt
# Expose the port
EXPOSE 5000
# Start the app
CMD ["npm", "run", "start:server"]

View File

@ -1,3 +1,8 @@
/**
* This is an experiment to see if we can analyze the audio of a video
*/
import { spawn } from "child_process"; import { spawn } from "child_process";
export interface AudioStats { export interface AudioStats {

View File

@ -1,10 +1,12 @@
import { WorkerPreset } from "graphile-worker" import { LoadTaskFromJsPlugin } from "graphile-worker/dist/plugins/LoadTaskFromJsPlugin.js";
import { env } from "./src/config/env" import { env } from "./src/config/env"
import path from 'node:path' import path from 'node:path'
const __dirname = import.meta.dirname; const __dirname = import.meta.dirname;
const preset: GraphileConfig.Preset = { const preset: GraphileConfig.Preset = {
extends: [WorkerPreset], plugins: [LoadTaskFromJsPlugin], // here we override the WorkerPreset plugins which included the undesirable LoadTaskFromExecutableFilePlugin
worker: { worker: {
connectionString: env.DATABASE_URL, connectionString: env.DATABASE_URL,
maxPoolSize: 10, maxPoolSize: 10,
@ -17,7 +19,6 @@ const preset: GraphileConfig.Preset = {
taskDirectory: path.join(__dirname, 'dist', 'tasks'), taskDirectory: path.join(__dirname, 'dist', 'tasks'),
// to log debug messages, set GRAPHILE_LOGGER_DEBUG=1 in env @see https://worker.graphile.org/docs/library/logger // to log debug messages, set GRAPHILE_LOGGER_DEBUG=1 in env @see https://worker.graphile.org/docs/library/logger
}, },
}; };

View File

@ -1,3 +1,49 @@
/**
* Migration Script: V1 V2 Database
* -----------------------------------
* This script migrates VTuber and VOD data from an old Postgres database (V1) into the new Prisma-backed database (V2).
*
* Usage:
* - Ensure environment variables are configured for both databases:
* V1_DB_HOST Hostname of the V1 database (default: "localhost")
* V1_DB_PORT Port of the V1 database (default: "5444")
* V1_DB_USER Username for the V1 database (default: "postgres")
* V1_DB_PASS Password for the V1 database (default: "password")
* V1_DB_NAME Database name for V1 (default: "restoredb")
* DEFAULT_UPLOADER_ID
* An existing user ID in the V2 database that will be set as the uploader for all migrated records.
*
* What it does:
* 1. Migrates VTubers:
* - Reads all rows from `vtubers` in V1.
* - Inserts each into V2s `vtuber` table using Prisma.
* - Maps all known fields (social links, images, themeColor, etc.).
* - Combines `description_1` and `description_2` into a single `description` field.
* - Assigns the `DEFAULT_UPLOADER_ID` to each migrated VTuber.
*
* 2. Migrates VODs:
* - Reads all rows from `vods` in V1.
* - Resolves associated VTubers via `vods_vtuber_links` `vtubers.slug`.
* - Finds related thumbnails and source video links via `vods_thumbnail_links` and `vods_video_src_b_2_links`.
* - Inserts each VOD into V2s `vod` table, connecting it to the corresponding VTubers by slug.
* - Assigns the `DEFAULT_UPLOADER_ID` to each migrated VOD.
*
* Notes:
* - This script assumes schema compatibility between V1 and V2 (field names may differ slightly).
* - Thumbnails and video source links fall back to `cdn_url` or `url` if available.
* - Any V1 records with missing or null values are gracefully handled with `null` fallbacks.
* - Run this script once; re-running may cause duplicate records unless unique constraints prevent it.
*
* Execution:
* Run with Node.js:
* $ npx @dotenvx/dotenvx run -f ./.env -- tsx ./migrate.ts
*
* Cleanup:
* - Connections to both the V1 database (pg.Pool) and Prisma client are properly closed at the end of execution.
*/
import { PrismaClient } from '../generated/prisma'; import { PrismaClient } from '../generated/prisma';
import pg from 'pg'; import pg from 'pg';

View File

@ -12,24 +12,26 @@ window.HELP_IMPROVE_VIDEOJS = false; // disable videojs tracking
const container = document.querySelector('.video-container') const container = document.querySelector('.video-container')
const isSupporter = container.dataset.supporter === 'true' if (container) {
const player = videojs('player'); const isSupporter = container.dataset.supporter === 'true'
const player = videojs('player');
player.ready(() => { player.ready(() => {
if (isSupporter) { if (isSupporter) {
// set up plugins // set up plugins
const funscripts = collectFunscripts() const funscripts = collectFunscripts()
const funscriptsOptions = { const funscriptsOptions = {
buttplugClientName: "future.porn", buttplugClientName: "future.porn",
debug: false, debug: false,
funscripts, funscripts,
}
player.funscriptPlayer(funscriptsOptions);
} }
player.funscriptPlayer(funscriptsOptions);
}
player.hlsQualitySelector({
player.hlsQualitySelector({ displayCurrentQuality: true,
displayCurrentQuality: true, });
}); })
}) }

View File

@ -36,7 +36,8 @@ const EnvSchema = z.object({
B2_APPLICATION_KEY: z.string(), B2_APPLICATION_KEY: z.string(),
SEEDBOX_SFTP_URL: z.string(), SEEDBOX_SFTP_URL: z.string(),
SEEDBOX_SFTP_USERNAME: z.string(), SEEDBOX_SFTP_USERNAME: z.string(),
SEEDBOX_SFTP_PASSWORD: z.string() SEEDBOX_SFTP_PASSWORD: z.string(),
QBT_HOST: z.string().default('localhost'),
}); });
const parsed = EnvSchema.safeParse(process.env); const parsed = EnvSchema.safeParse(process.env);

View File

@ -379,5 +379,4 @@ export class QBittorrentClient {
} }
} }
const opts = env.NODE_ENV === 'production' ? { host: 'qbittorrent' } : { host: 'localhost' } export const qbtClient = new QBittorrentClient({ host: env.QBT_HOST });
export const qbtClient = new QBittorrentClient(opts);

View File

@ -0,0 +1,7 @@
FROM ultralytics/ultralytics:8.3.203-cpu AS base
COPY --from=google/shaka-packager:v3.4.2 /usr/bin /usr/local/bin
ENTRYPOINT /bin/sh

View File

@ -31,7 +31,7 @@ variable "site_url" {
} }
variable "aws_bucket" { variable "aws_bucket" {
default = "futureporn" default = "fp-usc"
} }
variable "aws_region" { variable "aws_region" {
@ -90,11 +90,11 @@ resource "vultr_reserved_ip" "futureporn_v2_ip" {
ip_type = "v4" ip_type = "v4"
} }
resource "vultr_reserved_ip" "futureporn_tracker_ip" { # resource "vultr_reserved_ip" "futureporn_tracker_ip" {
label = "futureporn-tracker" # label = "futureporn-tracker"
region = "ord" # region = "ord"
ip_type = "v4" # ip_type = "v4"
} # }
# Virtual Private Cloud for connecting many VPS together on a private network # Virtual Private Cloud for connecting many VPS together on a private network
@ -124,31 +124,31 @@ resource "bunnynet_dns_zone" "future_porn" {
# load balancing instance # load balancing instance
# resource "vultr_instance" "load_balancer" { resource "vultr_instance" "load_balancer" {
# count = 1 count = 1
# hostname = "fp-lb-${count.index}" hostname = "fp-lb-${count.index}"
# plan = "vc2-1c-2gb" plan = "vc2-1c-2gb"
# region = "ord" region = "ord"
# backups = "disabled" backups = "disabled"
# ddos_protection = "false" ddos_protection = "false"
# os_id = 1743 os_id = 1743
# enable_ipv6 = true enable_ipv6 = true
# label = "fp lb ${count.index}" label = "fp lb ${count.index}"
# tags = ["futureporn", "load_balancer", "our"] tags = ["futureporn", "load_balancer", "our"]
# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID] ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
# user_data = base64encode(var.vps_user_data) user_data = base64encode(var.vps_user_data)
# vpc_ids = [ vpc_ids = [
# vultr_vpc.futureporn_vpc.id vultr_vpc.futureporn_vpc.id
# ] ]
# reserved_ip_id = vultr_reserved_ip.futureporn_v2_ip.id reserved_ip_id = vultr_reserved_ip.futureporn_v2_ip.id
# } }
resource "bunnynet_dns_record" "future_porn_apex" { resource "bunnynet_dns_record" "future_porn_apex" {
zone = bunnynet_dns_zone.future_porn.id zone = bunnynet_dns_zone.future_porn.id
name = "" name = ""
type = "A" type = "A"
value = vultr_instance.our_vps[0].main_ip value = vultr_instance.our_loadbalancer[0].main_ip
ttl = 3600 ttl = 3600
} }
@ -166,78 +166,62 @@ resource "bunnynet_dns_record" "www_future_porn" {
# vultr instance for running our app # vultr instance for running our app
# resource "vultr_instance" "our_server" { resource "vultr_instance" "our_server" {
# count = 1 count = 1
# hostname = "fp-our-server-${count.index}" hostname = "fp-our-server-${count.index}"
# plan = "vc2-2c-4gb" plan = "vc2-2c-4gb"
# region = "ord" region = "ord"
# backups = "disabled" backups = "disabled"
# ddos_protection = "false" ddos_protection = "false"
# os_id = 1743 os_id = 1743
# enable_ipv6 = true enable_ipv6 = true
# label = "fp our server ${count.index}" label = "fp our server ${count.index}"
# tags = ["futureporn", "our", "server"] tags = ["futureporn", "our", "server"]
# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID] ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
# vpc_ids = [ vpc_ids = [
# vultr_vpc.futureporn_vpc.id vultr_vpc.futureporn_vpc.id
# ] ]
# user_data = base64encode(var.vps_user_data) user_data = base64encode(var.vps_user_data)
# } }
# vultr instance for running our app's background task runners # vultr instance for running our app's background task runners
# resource "vultr_instance" "our_worker" { resource "vultr_instance" "our_worker" {
# count = 1 count = 2
# hostname = "fp-our-worker-${count.index}" hostname = "fp-our-worker-${count.index}"
# plan = "vc2-2c-4gb" plan = "vc2-2c-4gb"
region = "ord"
backups = "disabled"
ddos_protection = "false"
os_id = 1743
enable_ipv6 = true
label = "fp our worker ${count.index}"
tags = ["futureporn", "our", "worker"]
ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
vpc_ids = [
vultr_vpc.futureporn_vpc.id
]
user_data = base64encode(var.vps_user_data)
}
# # vultr instance meant for capturing VODs
# resource "vultr_instance" "capture_vps" {
# count = 0
# hostname = "fp-cap-${count.index}"
# plan = "vc2-2c-2gb"
# region = "ord" # region = "ord"
# backups = "disabled" # backups = "disabled"
# ddos_protection = "false" # ddos_protection = "false"
# os_id = 1743 # os_id = 1743
# enable_ipv6 = true # enable_ipv6 = true
# label = "fp our worker ${count.index}" # vpc_ids = [vultr_vpc.futureporn_vpc.id]
# tags = ["futureporn", "our", "worker"] # label = "fp capture ${count.index}"
# tags = ["futureporn", "capture"]
# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID] # ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
# vpc_ids = [ # user_data = base64encode(var.vps_user_data)
# vultr_vpc.futureporn_vpc.id
# ]
# user_data = base64encode(var.vps_user_data)
# } # }
# vultr instance meant for capturing VODs
resource "vultr_instance" "capture_vps" {
count = 1
hostname = "fp-cap-${count.index}"
plan = "vc2-2c-2gb"
region = "ord"
backups = "disabled"
ddos_protection = "false"
os_id = 1743
enable_ipv6 = true
vpc_ids = [vultr_vpc.futureporn_vpc.id]
label = "fp capture ${count.index}"
tags = ["futureporn", "capture"]
ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
user_data = base64encode(var.vps_user_data)
}
# vultr instance meant for running our future.porn app
resource "vultr_instance" "our_vps" {
count = 1
hostname = "fp-our-${count.index}"
plan = "vc2-2c-2gb"
region = "ord"
backups = "disabled"
ddos_protection = "false"
os_id = 1743
enable_ipv6 = true
vpc_ids = [vultr_vpc.futureporn_vpc.id]
label = "fp our ${count.index}"
tags = ["futureporn", "capture"]
ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
user_data = base64encode(var.vps_user_data)
}
# vultr instance with a GPU. experimental. # vultr instance with a GPU. experimental.
# resource "vultr_instance" "capture_vps" { # resource "vultr_instance" "capture_vps" {
@ -261,54 +245,61 @@ resource "vultr_instance" "our_vps" {
# } # }
# resource "vultr_instance" "database" { resource "vultr_instance" "database" {
# count = 1 count = 1
# hostname = "fp-db-${count.index}" hostname = "fp-db-${count.index}"
# plan = "vc2-1c-2gb" plan = "vc2-1c-2gb"
# region = "ord" region = "ord"
# backups = "enabled" backups = "enabled"
# backups_schedule { backups_schedule {
# hour = "2" hour = "2"
# type = "daily" type = "daily"
# } }
# ddos_protection = "false"
# os_id = 1743
# enable_ipv6 = true
# vpc_ids = [vultr_vpc.futureporn_vpc.id]
# label = "fp database ${count.index}"
# tags = ["futureporn", "database"]
# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
# user_data = base64encode(var.vps_user_data)
# }
resource "vultr_instance" "tracker" {
count = 0
hostname = "fp-tracker-${count.index}"
plan = "vc2-1c-2gb"
region = "ord"
backups = "disabled"
ddos_protection = "false" ddos_protection = "false"
os_id = 1743 os_id = 1743
enable_ipv6 = true enable_ipv6 = true
vpc_ids = [vultr_vpc.futureporn_vpc.id] vpc_ids = [vultr_vpc.futureporn_vpc.id]
label = "fp tracker ${count.index}" label = "fp database ${count.index}"
tags = ["futureporn", "tracker"] tags = ["futureporn", "database"]
ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID] ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
user_data = base64encode(var.vps_user_data) user_data = base64encode(var.vps_user_data)
reserved_ip_id = vultr_reserved_ip.futureporn_tracker_ip.id
} }
# resource "ansible_host" "ipfs_vps" {
# for_each = { for idx, host in var.ipfs_hosts : idx => host }
# name = each.value
# groups = ["ipfs"]
# variables = { # backups = "enabled"
# ansible_user = "root" # backups_schedule {
# ansible_host = each.value # hour = "2"
# } # type = "daily"
# }
# resource "vultr_instance" "tracker" {
# count = 0
# hostname = "fp-tracker-${count.index}"
# plan = "vc2-1c-2gb"
# region = "ord"
# backups = "disabled"
# ddos_protection = "false"
# os_id = 1743
# enable_ipv6 = true
# vpc_ids = [vultr_vpc.futureporn_vpc.id]
# label = "fp tracker ${count.index}"
# tags = ["futureporn", "tracker"]
# ssh_key_ids = [local.envs.VULTR_SSH_KEY_ID]
# user_data = base64encode(var.vps_user_data)
# reserved_ip_id = vultr_reserved_ip.futureporn_tracker_ip.id
# } # }
resource "ansible_host" "ipfs_vps" {
for_each = { for idx, host in var.ipfs_hosts : idx => host }
name = each.value
groups = ["ipfs"]
variables = {
ansible_user = "root"
ansible_host = each.value
}
}
# resource "ansible_host" "capture_vps" { # resource "ansible_host" "capture_vps" {
# for_each = { for idx, host in vultr_instance.capture_vps : idx => host } # for_each = { for idx, host in vultr_instance.capture_vps : idx => host }
@ -326,48 +317,48 @@ resource "vultr_instance" "tracker" {
# } # }
# } # }
# resource "ansible_host" "load_balancer" { resource "ansible_host" "load_balancer" {
# for_each = { for idx, host in vultr_instance.load_balancer : idx => host } for_each = { for idx, host in vultr_instance.load_balancer : idx => host }
# name = each.value.hostname name = each.value.hostname
# groups = ["load_balancer"] groups = ["load_balancer"]
# variables = { variables = {
# ansible_host = each.value.main_ip ansible_host = each.value.main_ip
# internal_ip = each.value.internal_ip internal_ip = each.value.internal_ip
# } }
# } }
# resource "ansible_host" "database" { resource "ansible_host" "database" {
# for_each = { for idx, host in vultr_instance.database : idx => host } for_each = { for idx, host in vultr_instance.database : idx => host }
# name = each.value.hostname name = each.value.hostname
# groups = ["database"] groups = ["database"]
# variables = { variables = {
# ansible_host = each.value.main_ip ansible_host = each.value.main_ip
# internal_ip = each.value.internal_ip internal_ip = each.value.internal_ip
# } }
# } }
# resource "ansible_host" "our_server" { resource "ansible_host" "our_server" {
# for_each = { for idx, host in vultr_instance.our_server : idx => host } for_each = { for idx, host in vultr_instance.our_server : idx => host }
# name = each.value.hostname name = each.value.hostname
# groups = ["our-server"] groups = ["our-server"]
# variables = { variables = {
# ansible_host = each.value.main_ip ansible_host = each.value.main_ip
# internal_ip = each.value.internal_ip internal_ip = each.value.internal_ip
# vultr_instance_id = each.value.id vultr_instance_id = each.value.id
# } }
# } }
# resource "ansible_host" "our_worker" { resource "ansible_host" "our_worker" {
# for_each = { for idx, host in vultr_instance.our_worker : idx => host } for_each = { for idx, host in vultr_instance.our_worker : idx => host }
# name = each.value.hostname name = each.value.hostname
# groups = ["our-worker"] groups = ["our-worker"]
# variables = { variables = {
# ansible_host = each.value.main_ip ansible_host = each.value.main_ip
# internal_ip = each.value.internal_ip internal_ip = each.value.internal_ip
# vultr_instance_id = each.value.id vultr_instance_id = each.value.id
# } }
# } }
# resource "ansible_host" "tracker" { # resource "ansible_host" "tracker" {
@ -402,61 +393,42 @@ resource "vultr_virtual_file_system_storage" "vfs" {
} }
# resource "ansible_host" "periphery" {
# for_each = { for idx, host in vultr_instance.our_vps : idx => host }
# name = each.value.hostname
# groups = ["periphery"]
# variables = {
# ansible_host = each.value.main_ip
# internal_ip = each.value.internal_ip
# vultr_instance_id = each.value.id
# }
# }
# resource "ansible_group" "capture" { # resource "ansible_group" "capture" {
# name = "capture" # name = "capture"
# } # }
# resource "ansible_group" "our-server" { resource "ansible_group" "our-server" {
# name = "our-server" name = "our-server"
# } }
# resource "ansible_group" "our-worker" { resource "ansible_group" "our-worker" {
# name = "our-worker" name = "our-worker"
# } }
# resource "ansible_group" "tracker" {
# name = "tracker"
# }
resource "ansible_group" "our" { resource "ansible_group" "our" {
name = "our" name = "our"
} }
# resource "ansible_group" "periphery" {
# name = "periphery"
# }
# resource "ansible_group" "load_balancer" { resource "ansible_group" "load_balancer" {
# name = "load_balancer" name = "load_balancer"
# } }
# resource "ansible_group" "database" { resource "ansible_group" "database" {
# name = "database" name = "database"
# } }
resource "ansible_group" "futureporn" { resource "ansible_group" "futureporn" {
name = "futureporn" name = "futureporn"
children = [ children = [
# "load_balancer", "load_balancer",
# "database", "database",
# "capture", "capture",
# "our-server", "our-server",
# "our-worker", "our-worker",
# "periphery",
# "tracker",
"our" "our"
] ]
} }