add ipfs playbook
This commit is contained in:
		
							parent
							
								
									40ee0589b7
								
							
						
					
					
						commit
						325fe576e2
					
				
							
								
								
									
										3
									
								
								.vscode/settings.json
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.vscode/settings.json
									
									
									
									
										vendored
									
									
								
							@ -8,5 +8,6 @@
 | 
			
		||||
    },
 | 
			
		||||
    "editor.tabSize": 2,
 | 
			
		||||
    "editor.formatOnSave": true,
 | 
			
		||||
    "git.ignoreLimitWarning": true
 | 
			
		||||
    "git.ignoreLimitWarning": true,
 | 
			
		||||
    "ansible.python.interpreterPath": "/home/cj/Documents/futureporn-monorepo/venv/bin/python"
 | 
			
		||||
}
 | 
			
		||||
@ -2,25 +2,28 @@
 | 
			
		||||
 | 
			
		||||
Here we have playbooks to help provision infrastructure to do specific tasks.
 | 
			
		||||
 | 
			
		||||
## Inventory
 | 
			
		||||
## Getting started
 | 
			
		||||
 | 
			
		||||
Terraform handles spinning up Vultr instances. See `../terraform` for that. Also, Terraform is where we get our Ansible inventory, so make sure to run `tofu apply` in `../terraform` before working with Ansible.
 | 
			
		||||
### Dependencies
 | 
			
		||||
 | 
			
		||||
## Available plays
 | 
			
		||||
Install the necessary ansible galaxy roles & collections
 | 
			
		||||
 | 
			
		||||
    ansible-galaxy role install -r requirements.yml
 | 
			
		||||
    ansible-galaxy collection install -r requirements.yml
 | 
			
		||||
 | 
			
		||||
Generate an inventory file, via opentofu.
 | 
			
		||||
 | 
			
		||||
tofu handles spinning up Vultr instances. See `../terraform` for that. Also, Terraform is where we get our Ansible inventory, so make sure to run `tofu apply` in `../terraform` before working with Ansible.
 | 
			
		||||
 | 
			
		||||
### Run playbook
 | 
			
		||||
 | 
			
		||||
    ansible-playbook ./site.yml 
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Available playbooks
 | 
			
		||||
 | 
			
		||||
### bootstrap.yml
 | 
			
		||||
 | 
			
		||||
Prepare the instances for working with Ansible. This sets up SSH and ensures python is installed.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
### k3s-ansible
 | 
			
		||||
 | 
			
		||||
Provision the instances to act as a k3s cluster.
 | 
			
		||||
 | 
			
		||||
@see https://github.com/k3s-io/k3s-ansible/tree/master
 | 
			
		||||
 | 
			
		||||
tl;dr: `ansible-playbook k3s.orchestration.site`
 | 
			
		||||
 | 
			
		||||
### site.yml
 | 
			
		||||
 | 
			
		||||
Sets up all the instances to perform bright.futureporn.net workloads. Includes setting up Dokku to work with k3s-scheduler.
 | 
			
		||||
							
								
								
									
										1
									
								
								ansible/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								ansible/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1 @@
 | 
			
		||||
---
 | 
			
		||||
							
								
								
									
										9
									
								
								ansible/roles/backblaze/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								ansible/roles/backblaze/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,9 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
backblaze_bucket_name: fp-usc
 | 
			
		||||
backblaze_allowed_origins:
 | 
			
		||||
  - https://futureporn.net
 | 
			
		||||
backblaze_enable_b2_cli: true
 | 
			
		||||
backblaze_enable_cors: false
 | 
			
		||||
backblaze_auth_key_id: "{{ lookup('dotenv', 'BACKBLAZE_AUTH_KEY_ID', file='../../../../.env.production') }}"
 | 
			
		||||
backblaze_auth_application_key: "{{ lookup('dotenv', 'BACKBLAZE_AUTH_APPLICATION_KEY', file='../../../../.env.production') }}"
 | 
			
		||||
							
								
								
									
										18
									
								
								ansible/roles/backblaze/tasks/b2-cli.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								ansible/roles/backblaze/tasks/b2-cli.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,18 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Download b2-cli
 | 
			
		||||
  ansible.builtin.get_url:
 | 
			
		||||
    url: https://github.com/Backblaze/B2_Command_Line_Tool/releases/download/v4.4.2/b2v4-linux
 | 
			
		||||
    dest: /usr/local/bin/b2
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
 | 
			
		||||
- name: Authenticate b2-cli
 | 
			
		||||
  ansible.builtin.expect:
 | 
			
		||||
    command: /usr/local/bin/b2 account authorize
 | 
			
		||||
    responses:
 | 
			
		||||
      "Backblaze application key ID:":
 | 
			
		||||
        - "{{ backblaze_auth_key_id }}"
 | 
			
		||||
      "Backblaze application key:":
 | 
			
		||||
        - "{{ backblaze_auth_application_key }}"
 | 
			
		||||
    creates: /home/ipfs/.config/b2/account_info
 | 
			
		||||
  become: true
 | 
			
		||||
  become_user: ipfs
 | 
			
		||||
							
								
								
									
										65
									
								
								ansible/roles/backblaze/tasks/cors.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										65
									
								
								ansible/roles/backblaze/tasks/cors.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,65 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
# - name: Get current CORS rules
 | 
			
		||||
#   ansible.builtin.command: b2 bucket get "{{ backblaze_bucket_name }}"
 | 
			
		||||
#   register: b2_get_result
 | 
			
		||||
#   changed_when: false
 | 
			
		||||
 | 
			
		||||
# - name: Extract current CORS rules JSON
 | 
			
		||||
#   ansible.builtin.set_fact:
 | 
			
		||||
#     current_cors: "{{ (b2_get_result.stdout | from_json).corsRules | default([]) }}"
 | 
			
		||||
 | 
			
		||||
# - name: Load desired CORS rules
 | 
			
		||||
#   ansible.builtin.slurp:
 | 
			
		||||
#     src: "{{ cors_file }}"
 | 
			
		||||
#   register: desired_cors_raw
 | 
			
		||||
 | 
			
		||||
# - name: Decode and parse desired CORS rules
 | 
			
		||||
#   ansible.builtin.set_fact:
 | 
			
		||||
#     desired_cors: "{{ desired_cors_raw.content | b64decode | from_json }}"
 | 
			
		||||
 | 
			
		||||
# - name: Compare and set CORS if different
 | 
			
		||||
#   ansible.builtin.command: >
 | 
			
		||||
#     b2 bucket update --cors-rules "{{ desired_cors_raw.content | b64decode }}" "{{ bucket_name }}"
 | 
			
		||||
#   when: current_cors | to_json != desired_cors | to_json
 | 
			
		||||
#   changed_when: true
 | 
			
		||||
 | 
			
		||||
# #
 | 
			
		||||
 | 
			
		||||
- name: Render cors-rules.json from template
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
    src: cors-rules.json.j2
 | 
			
		||||
    dest: /tmp/cors-rules.json
 | 
			
		||||
 | 
			
		||||
- name: Get current CORS rules
 | 
			
		||||
  ansible.builtin.command: b2 bucket get "{{ backblaze_bucket_name }}"
 | 
			
		||||
  register: b2_get_result
 | 
			
		||||
  changed_when: false
 | 
			
		||||
 | 
			
		||||
- name: Extract current CORS rules JSON
 | 
			
		||||
  ansible.builtin.set_fact:
 | 
			
		||||
    current_cors: "{{ (b2_get_result.stdout | from_json).corsRules | default([]) }}"
 | 
			
		||||
 | 
			
		||||
- name: Load desired CORS rules
 | 
			
		||||
  ansible.builtin.slurp:
 | 
			
		||||
    src: /tmp/cors-rules.json
 | 
			
		||||
  register: desired_cors_raw
 | 
			
		||||
 | 
			
		||||
- name: Decode and parse desired CORS rules
 | 
			
		||||
  ansible.builtin.set_fact:
 | 
			
		||||
    desired_cors: "{{ desired_cors_raw.content | b64decode | from_json }}"
 | 
			
		||||
 | 
			
		||||
- name: Debug desired_cors
 | 
			
		||||
  ansible.builtin.debug:
 | 
			
		||||
    var: desired_cors
 | 
			
		||||
 | 
			
		||||
- name: Debug current_cors
 | 
			
		||||
  ansible.builtin.debug:
 | 
			
		||||
    var: current_cors
 | 
			
		||||
 | 
			
		||||
- name: Compare and set CORS if different
 | 
			
		||||
  ansible.builtin.command: >
 | 
			
		||||
    b2 bucket update --cors-rules "{{ desired_cors_raw.content | b64decode }}" "{{ backblaze_bucket_name }}"
 | 
			
		||||
  when: current_cors | to_json != desired_cors | to_json
 | 
			
		||||
  changed_when: true
 | 
			
		||||
							
								
								
									
										11
									
								
								ansible/roles/backblaze/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								ansible/roles/backblaze/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,11 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Configure b2-cli
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: b2-cli.yml
 | 
			
		||||
  when: backblaze_enable_b2_cli
 | 
			
		||||
 | 
			
		||||
- name: Configure CORS
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: cors.yml
 | 
			
		||||
  when: backblaze_enable_cors
 | 
			
		||||
							
								
								
									
										14
									
								
								ansible/roles/backblaze/templates/cors-rules.json.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								ansible/roles/backblaze/templates/cors-rules.json.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,14 @@
 | 
			
		||||
[
 | 
			
		||||
  {
 | 
			
		||||
    "allowedHeaders": ["*"],
 | 
			
		||||
    "allowedOperations": ["s3_head", "s3_put", "s3_get"],
 | 
			
		||||
    "allowedOrigins": [
 | 
			
		||||
      {% for origin in backblaze_allowed_origins %}
 | 
			
		||||
        "{{ origin }}"{% if not loop.last %},{% endif %}
 | 
			
		||||
      {% endfor %}
 | 
			
		||||
    ],
 | 
			
		||||
    "corsRuleName": "allowUploads",
 | 
			
		||||
    "exposeHeaders": ["etag"],
 | 
			
		||||
    "maxAgeSeconds": 3600
 | 
			
		||||
  }
 | 
			
		||||
]
 | 
			
		||||
@ -1,7 +1,7 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## @warning Do not edit this file! It's probably right, and you're probably doing something wrong! 
 | 
			
		||||
##          If you're seeing interactive host auth checks prompts while running ansible-playbook, 
 | 
			
		||||
## @warning Do not edit this file! It's probably right, and you're probably doing something wrong!
 | 
			
		||||
##          If you're seeing interactive host auth checks prompts while running ansible-playbook,
 | 
			
		||||
##          you're probably skipping over this role! Don't skip it!
 | 
			
		||||
 | 
			
		||||
## @see https://gist.github.com/shirou/6928012
 | 
			
		||||
@ -9,21 +9,22 @@
 | 
			
		||||
- name: Scan for SSH host keys.
 | 
			
		||||
  delegate_to: localhost
 | 
			
		||||
  ansible.builtin.shell:
 | 
			
		||||
    cmd: ssh-keyscan -q -p 22 {{ ansible_host }} 2>/dev/null
 | 
			
		||||
    cmd: ssh-keyscan -p 22 {{ ansible_host }} 2>/dev/null
 | 
			
		||||
  changed_when: false
 | 
			
		||||
  register: ssh_scan
 | 
			
		||||
  register: bootstrap_ssh_scan
 | 
			
		||||
  retries: 2 # it always fails the first time
 | 
			
		||||
  until: ssh_scan.rc == 0
 | 
			
		||||
  until: bootstrap_ssh_scan.rc == 0
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- debug:
 | 
			
		||||
    var: ssh_scan
 | 
			
		||||
- name: Debug bootstrap_ssh_scan variable
 | 
			
		||||
  ansible.builtin.debug:
 | 
			
		||||
    var: bootstrap_ssh_scan
 | 
			
		||||
 | 
			
		||||
- name: Update known_hosts.
 | 
			
		||||
  ansible.builtin.known_hosts:
 | 
			
		||||
    key: "{{ item }}"
 | 
			
		||||
    name: "{{ ansible_host }}"
 | 
			
		||||
  with_items: "{{ ssh_scan.stdout_lines }}"
 | 
			
		||||
  with_items: "{{ bootstrap_ssh_scan.stdout_lines }}"
 | 
			
		||||
  delegate_to: localhost
 | 
			
		||||
 | 
			
		||||
- name: Install python3
 | 
			
		||||
 | 
			
		||||
@ -11,7 +11,7 @@
 | 
			
		||||
      - ncdu
 | 
			
		||||
      - pipx
 | 
			
		||||
      - fd-find
 | 
			
		||||
    update_cache: yes
 | 
			
		||||
    update_cache: true
 | 
			
		||||
    state: present
 | 
			
		||||
 | 
			
		||||
- name: Download Docker installer
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										3
									
								
								ansible/roles/coolify/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								ansible/roles/coolify/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,3 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
coolify_dir: /opt/coolify
 | 
			
		||||
							
								
								
									
										25
									
								
								ansible/roles/coolify/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								ansible/roles/coolify/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,25 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Install Coolify using official installer
 | 
			
		||||
  ansible.builtin.shell: |
 | 
			
		||||
    curl -fsSL https://cdn.coollabs.io/coolify/install.sh | sudo bash
 | 
			
		||||
  args:
 | 
			
		||||
    creates: /coolify/docker-compose.yml  # adjust if needed to prevent reruns
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# @note securely connect to coolify webui using SSH tunneling.
 | 
			
		||||
# ssh -L 8000:localhost:8000 root@our
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# @see https://coolify.io/docs/knowledge-base/server/firewall
 | 
			
		||||
- name: Allow UFW ports
 | 
			
		||||
  community.general.ufw:
 | 
			
		||||
    rule: allow
 | 
			
		||||
    port: "{{ item }}"
 | 
			
		||||
    proto: tcp
 | 
			
		||||
  loop:
 | 
			
		||||
    - 8000 # coolify UI
 | 
			
		||||
    - 6001 # real-time comms
 | 
			
		||||
    - 6002 # terminal
 | 
			
		||||
    - 80
 | 
			
		||||
    - 443
 | 
			
		||||
							
								
								
									
										52
									
								
								ansible/roles/docker/tasks/docker.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										52
									
								
								ansible/roles/docker/tasks/docker.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,52 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Ensure prerequisites are installed
 | 
			
		||||
  become: true
 | 
			
		||||
  ansible.builtin.apt:
 | 
			
		||||
    name:
 | 
			
		||||
      - ca-certificates
 | 
			
		||||
      - curl
 | 
			
		||||
    state: present
 | 
			
		||||
    update_cache: true
 | 
			
		||||
 | 
			
		||||
- name: Ensure /etc/apt/keyrings directory exists
 | 
			
		||||
  become: true
 | 
			
		||||
  ansible.builtin.file:
 | 
			
		||||
    path: /etc/apt/keyrings
 | 
			
		||||
    state: directory
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
 | 
			
		||||
- name: Download Docker GPG key
 | 
			
		||||
  become: true
 | 
			
		||||
  ansible.builtin.get_url:
 | 
			
		||||
    url: https://download.docker.com/linux/ubuntu/gpg
 | 
			
		||||
    dest: /etc/apt/keyrings/docker.asc
 | 
			
		||||
    mode: "0644"
 | 
			
		||||
 | 
			
		||||
- name: Add Docker APT repository
 | 
			
		||||
  become: true
 | 
			
		||||
  ansible.builtin.apt_repository:
 | 
			
		||||
    repo: "deb [arch={{ ansible_architecture }} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {{ ansible_lsb.codename }} stable"
 | 
			
		||||
    state: present
 | 
			
		||||
    filename: docker
 | 
			
		||||
    update_cache: true
 | 
			
		||||
 | 
			
		||||
- name: Install docker
 | 
			
		||||
  become: true
 | 
			
		||||
  ansible.builtin.apt:
 | 
			
		||||
    name:
 | 
			
		||||
      - docker-ce
 | 
			
		||||
      - docker-ce-cli
 | 
			
		||||
      - containerd.io
 | 
			
		||||
      - docker-buildx-plugin
 | 
			
		||||
      - docker-compose-plugin
 | 
			
		||||
    state: present
 | 
			
		||||
    update_cache: true
 | 
			
		||||
 | 
			
		||||
- name: Install docker ansible module dependencies
 | 
			
		||||
  ansible.builtin.pip:
 | 
			
		||||
    name: "{{ item }}"
 | 
			
		||||
    state: present
 | 
			
		||||
  loop:
 | 
			
		||||
    - jsondiff
 | 
			
		||||
    - pyyaml
 | 
			
		||||
    - docker
 | 
			
		||||
							
								
								
									
										5
									
								
								ansible/roles/docker/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								ansible/roles/docker/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,5 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Install docker
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: docker.yml
 | 
			
		||||
							
								
								
									
										3
									
								
								ansible/roles/infisical/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								ansible/roles/infisical/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,3 @@
 | 
			
		||||
---
 | 
			
		||||
infisical_caddy_image: caddy:2
 | 
			
		||||
infisical_docker_tag: latest-postgres
 | 
			
		||||
							
								
								
									
										13
									
								
								ansible/roles/infisical/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								ansible/roles/infisical/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,13 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Restart caddy
 | 
			
		||||
  community.docker.docker_container:
 | 
			
		||||
    name: infisical-caddy-1
 | 
			
		||||
    image: "{{ infisical_caddy_image }}"
 | 
			
		||||
    state: started
 | 
			
		||||
    restart: true
 | 
			
		||||
 | 
			
		||||
- name: Restart infisical
 | 
			
		||||
  community.docker.docker_compose_v2:
 | 
			
		||||
    project_src: /opt/infisical
 | 
			
		||||
    state: restarted
 | 
			
		||||
							
								
								
									
										45
									
								
								ansible/roles/infisical/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								ansible/roles/infisical/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,45 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Ensure infisical directory exists
 | 
			
		||||
  ansible.builtin.file:
 | 
			
		||||
    path: /opt/infisical
 | 
			
		||||
    state: directory
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
 | 
			
		||||
- name: Generate .env file
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: env.j2
 | 
			
		||||
    dest: /opt/infisical/.env
 | 
			
		||||
    mode: "0600"
 | 
			
		||||
 | 
			
		||||
- name: Install passlib
 | 
			
		||||
  ansible.builtin.pip:
 | 
			
		||||
    name: passlib # dependency of Ansible's passwordhash
 | 
			
		||||
    state: present
 | 
			
		||||
 | 
			
		||||
- name: Template Caddyfile
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: Caddyfile.j2
 | 
			
		||||
    dest: /opt/infisical/Caddyfile
 | 
			
		||||
    mode: "0600"
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart caddy
 | 
			
		||||
 | 
			
		||||
- name: Template Docker Compose file
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: docker-compose.yml.j2
 | 
			
		||||
    dest: /opt/infisical/docker-compose.yml
 | 
			
		||||
    mode: "0644"
 | 
			
		||||
 | 
			
		||||
- name: Start up docker-compose.yml
 | 
			
		||||
  community.docker.docker_compose_v2:
 | 
			
		||||
    project_src: /opt/infisical
 | 
			
		||||
    state: present
 | 
			
		||||
 | 
			
		||||
- name: Configure firewall
 | 
			
		||||
  community.general.ufw:
 | 
			
		||||
    rule: allow
 | 
			
		||||
    port: "{{ item }}"
 | 
			
		||||
    proto: tcp
 | 
			
		||||
  loop:
 | 
			
		||||
    - 443
 | 
			
		||||
							
								
								
									
										13
									
								
								ansible/roles/infisical/templates/Caddyfile.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								ansible/roles/infisical/templates/Caddyfile.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,13 @@
 | 
			
		||||
 | 
			
		||||
infisical.futureporn.net {
 | 
			
		||||
  # basic_auth {
 | 
			
		||||
	# 	{{ lookup('dotenv', 'INFISICAL_BASIC_AUTH_USERNAME', file='../../../../.env')}} {{ lookup('dotenv', 'INFISICAL_BASIC_AUTH_PASSWORD', file='../../../../.env') | password_hash('bcrypt') }}
 | 
			
		||||
	# }
 | 
			
		||||
 | 
			
		||||
  reverse_proxy infisical-backend:8080 {
 | 
			
		||||
    health_uri /
 | 
			
		||||
		health_interval 10s
 | 
			
		||||
		health_timeout 5s
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										86
									
								
								ansible/roles/infisical/templates/docker-compose.yml.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										86
									
								
								ansible/roles/infisical/templates/docker-compose.yml.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,86 @@
 | 
			
		||||
x-logging: &default-logging
 | 
			
		||||
  driver: "json-file"
 | 
			
		||||
  options:
 | 
			
		||||
    max-size: "${LOG_MAX_SIZE:-20m}"
 | 
			
		||||
    max-file: "${LOG_MAX_FILE:-10}"
 | 
			
		||||
    compress: "true"
 | 
			
		||||
 | 
			
		||||
services:  
 | 
			
		||||
  caddy:
 | 
			
		||||
    image: {{ infisical_caddy_image }}
 | 
			
		||||
    restart: unless-stopped
 | 
			
		||||
    volumes:
 | 
			
		||||
      - ./Caddyfile:/etc/caddy/Caddyfile
 | 
			
		||||
      - caddy_data:/data
 | 
			
		||||
    ports:
 | 
			
		||||
      - 443:443
 | 
			
		||||
    environment:
 | 
			
		||||
      - BASE_URL=infisical.futureporn.net
 | 
			
		||||
    logging: *default-logging
 | 
			
		||||
    networks:
 | 
			
		||||
      - infisical
 | 
			
		||||
 | 
			
		||||
  backend:  
 | 
			
		||||
    container_name: infisical-backend
 | 
			
		||||
    image: infisical/infisical:{{ infisical_docker_tag }}
 | 
			
		||||
    restart: unless-stopped  
 | 
			
		||||
    depends_on:  
 | 
			
		||||
      db:  
 | 
			
		||||
        condition: service_healthy  
 | 
			
		||||
      redis:  
 | 
			
		||||
        condition: service_started  
 | 
			
		||||
      db-migration:  
 | 
			
		||||
        condition: service_completed_successfully  
 | 
			
		||||
    pull_policy: always  
 | 
			
		||||
    env_file: .env  
 | 
			
		||||
    environment:  
 | 
			
		||||
      - NODE_ENV=production  
 | 
			
		||||
    ports:  
 | 
			
		||||
      - 80:8080  
 | 
			
		||||
    networks:  
 | 
			
		||||
      - infisical
 | 
			
		||||
 | 
			
		||||
  redis:  
 | 
			
		||||
    container_name: infisical-redis
 | 
			
		||||
    image: redis
 | 
			
		||||
    restart: unless-stopped  
 | 
			
		||||
    env_file: .env  
 | 
			
		||||
    environment:  
 | 
			
		||||
      - ALLOW_EMPTY_PASSWORD=yes  
 | 
			
		||||
    volumes:  
 | 
			
		||||
      - ./volumes/redis:/data  
 | 
			
		||||
    networks:  
 | 
			
		||||
      - infisical
 | 
			
		||||
 | 
			
		||||
  db:  
 | 
			
		||||
    container_name: infisical-db
 | 
			
		||||
    image: postgres:14-alpine  
 | 
			
		||||
    restart: unless-stopped  
 | 
			
		||||
    env_file: .env
 | 
			
		||||
    volumes:
 | 
			
		||||
      - ./volumes/postgres:/var/lib/postgresql/data  
 | 
			
		||||
    networks:  
 | 
			
		||||
      - infisical  
 | 
			
		||||
    healthcheck:  
 | 
			
		||||
      test: "pg_isready --username=${POSTGRES_USER} && psql --username=${POSTGRES_USER} --list"  
 | 
			
		||||
      interval: 5s  
 | 
			
		||||
      timeout: 10s  
 | 
			
		||||
      retries: 10 
 | 
			
		||||
 | 
			
		||||
  db-migration:  
 | 
			
		||||
    container_name: infisical-db-migration  
 | 
			
		||||
    depends_on:  
 | 
			
		||||
      db:  
 | 
			
		||||
        condition: service_healthy  
 | 
			
		||||
    image: infisical/infisical:{{ infisical_docker_tag }}
 | 
			
		||||
    env_file: .env  
 | 
			
		||||
    command: npm run migration:latest
 | 
			
		||||
    pull_policy: always  
 | 
			
		||||
    networks:  
 | 
			
		||||
      - infisical
 | 
			
		||||
 | 
			
		||||
networks:  
 | 
			
		||||
  infisical:
 | 
			
		||||
 | 
			
		||||
volumes:
 | 
			
		||||
  caddy_data: null
 | 
			
		||||
							
								
								
									
										23
									
								
								ansible/roles/infisical/templates/env.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								ansible/roles/infisical/templates/env.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,23 @@
 | 
			
		||||
# Website URL  
 | 
			
		||||
SITE_URL=https://infisical.futureporn.net
 | 
			
		||||
 | 
			
		||||
# Keys  
 | 
			
		||||
# Required key for platform encryption/decryption ops  
 | 
			
		||||
# GENERATE YOUR OWN KEY WITH `openssl rand -hex 16`  
 | 
			
		||||
ENCRYPTION_KEY={{ lookup('dotenv', 'INFISICAL_ENCRYPTION_KEY', file='../../../../.env') }}
 | 
			
		||||
 | 
			
		||||
# JWT
 | 
			
		||||
# Required secrets to sign JWT tokens  
 | 
			
		||||
# GENERATE YOUR OWN KEY WITH `openssl rand -base64 32`  
 | 
			
		||||
AUTH_SECRET={{ lookup('dotenv', 'INFISICAL_AUTH_SECRET', file='../../../../.env') }}
 | 
			
		||||
 | 
			
		||||
# Postgres  
 | 
			
		||||
POSTGRES_PASSWORD={{ lookup('dotenv', 'INFISICAL_POSTGRES_PASSWORD', file='../../../../.env') }}
 | 
			
		||||
POSTGRES_USER=infisical  
 | 
			
		||||
POSTGRES_DB=infisical  
 | 
			
		||||
# Do not change the next line  
 | 
			
		||||
DB_CONNECTION_URI=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
 | 
			
		||||
 | 
			
		||||
# Redis  
 | 
			
		||||
# Do not change the next line  
 | 
			
		||||
REDIS_URL=redis://redis:6379
 | 
			
		||||
@ -1,3 +1,11 @@
 | 
			
		||||
---
 | 
			
		||||
ipfs_kubo_version: v0.34.1
 | 
			
		||||
ipfs_cluster_follow_version: v1.1.2
 | 
			
		||||
ipfs_kubo_version: v0.38.0
 | 
			
		||||
ipfs_migrations_version: v2.0.2
 | 
			
		||||
ipfs_cluster_follow_version: v1.1.4
 | 
			
		||||
ipfs_cluster_service_version: v1.1.4
 | 
			
		||||
ipfs_storage_max: 5TB
 | 
			
		||||
ipfs_path: /mnt/blockstorage/ipfs
 | 
			
		||||
ipfs_enable_blockstorage: false
 | 
			
		||||
ipfs_enable_kubo: true
 | 
			
		||||
ipfs_enable_ipfs_cluster_service: false
 | 
			
		||||
ipfs_enable_ipfs_cluster_follow: false
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										72
									
								
								ansible/roles/ipfs/files/pinall.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								ansible/roles/ipfs/files/pinall.sh
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,72 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
set -euo pipefail
 | 
			
		||||
 | 
			
		||||
############################################################
 | 
			
		||||
# Script: pinall.sh
 | 
			
		||||
#
 | 
			
		||||
# Description:
 | 
			
		||||
#   This script reads a list of S3 object keys (filenames)
 | 
			
		||||
#   from a file, downloads each file from Backblaze B2,
 | 
			
		||||
#   adds it to a local IPFS node, and optionally cleans up
 | 
			
		||||
#   the temporary downloaded file to save disk space.
 | 
			
		||||
#
 | 
			
		||||
# Usage:
 | 
			
		||||
#   ./pinall.sh <file-with-s3-keys>
 | 
			
		||||
#   Example:
 | 
			
		||||
#     # sudo -u ipfs env IPFS_PATH=/mnt/blockstorage/ipfs pinall.sh /home/ipfs/filenames.txt
 | 
			
		||||
#
 | 
			
		||||
#   - files.txt should contain one S3 key per line.
 | 
			
		||||
#   - Lines starting with '#' or empty lines are ignored.
 | 
			
		||||
#
 | 
			
		||||
# Environment:
 | 
			
		||||
#   - Requires `b2` CLI configured with B2 credentials.
 | 
			
		||||
#   - Requires an IPFS node installed and accessible at
 | 
			
		||||
#     $IPFS_PATH (set in script).
 | 
			
		||||
#
 | 
			
		||||
# Behavior:
 | 
			
		||||
#   1. Reads each key from the input file.
 | 
			
		||||
#   2. Downloads the file from B2 to /tmp/<key>.
 | 
			
		||||
#   3. Adds the downloaded file to IPFS (CID version 1).
 | 
			
		||||
#   4. Deletes the temporary file after adding to IPFS.
 | 
			
		||||
#   5. Logs progress with timestamps to stdout.
 | 
			
		||||
#
 | 
			
		||||
# Exit Codes:
 | 
			
		||||
#   - 0: All files processed successfully.
 | 
			
		||||
#   - 1: Incorrect usage or missing input file.
 | 
			
		||||
#
 | 
			
		||||
############################################################
 | 
			
		||||
 | 
			
		||||
# Usage check
 | 
			
		||||
if [ $# -ne 1 ]; then
 | 
			
		||||
  echo "Usage: $0 <file-with-s3-keys>"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
ipfs id
 | 
			
		||||
echo "Using IPFS_PATH=$IPFS_PATH"
 | 
			
		||||
 | 
			
		||||
FILELIST=$1
 | 
			
		||||
 | 
			
		||||
while IFS= read -r KEY; do
 | 
			
		||||
  [[ -z "$KEY" || "$KEY" =~ ^# ]] && continue
 | 
			
		||||
 | 
			
		||||
  echo "[$(date +"%Y-%m-%d %H:%M:%S")] Downloading $KEY from B2..."
 | 
			
		||||
  TMPFILE="/tmp/$KEY"
 | 
			
		||||
 | 
			
		||||
  if b2 file download "b2://futureporn/$KEY" "$TMPFILE"; then
 | 
			
		||||
    echo "[$(date +"%Y-%m-%d %H:%M:%S")] Download complete: $KEY"
 | 
			
		||||
  else
 | 
			
		||||
    echo "[$(date +"%Y-%m-%d %H:%M:%S")] Download failed: $KEY"
 | 
			
		||||
    rm -f "$TMPFILE"
 | 
			
		||||
    continue
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
  echo "[$(date +"%Y-%m-%d %H:%M:%S")] Adding $KEY to IPFS..."
 | 
			
		||||
  ipfs add --cid-version=1 "$TMPFILE"
 | 
			
		||||
 | 
			
		||||
  # optional cleanup to save space
 | 
			
		||||
  rm -f "$TMPFILE"
 | 
			
		||||
 | 
			
		||||
done < "$FILELIST"
 | 
			
		||||
 | 
			
		||||
echo "[$(date +"%Y-%m-%d %H:%M:%S")] All tasks complete."
 | 
			
		||||
@ -14,3 +14,17 @@
 | 
			
		||||
    enabled: true
 | 
			
		||||
    daemon_reload: true
 | 
			
		||||
 | 
			
		||||
- name: Migrate ipfs
 | 
			
		||||
  ansible.builtin.shell:
 | 
			
		||||
    creates: "{{ ipfs_path }}/version"
 | 
			
		||||
    cmd: "env IPFS_PATH={{ ipfs_path }} /usr/local/bin/fs-repo-migrations"  # noqa command-instead-of-shell
 | 
			
		||||
  become: true
 | 
			
		||||
  become_user: ipfs
 | 
			
		||||
 | 
			
		||||
- name: Initialize ipfs
 | 
			
		||||
  ansible.builtin.shell:
 | 
			
		||||
    creates: "{{ ipfs_path }}/datastore_spec"
 | 
			
		||||
    cmd: "env IPFS_PATH={{ ipfs_path }} /usr/local/bin/ipfs init --profile pebbleds"  # noqa command-instead-of-shell
 | 
			
		||||
  become: true
 | 
			
		||||
  become_user: ipfs
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										28
									
								
								ansible/roles/ipfs/tasks/blockstorage.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								ansible/roles/ipfs/tasks/blockstorage.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,28 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Create gpt table on /dev/vdb
 | 
			
		||||
  community.general.parted:
 | 
			
		||||
    device: /dev/vdb
 | 
			
		||||
    number: 1
 | 
			
		||||
    part_type: primary
 | 
			
		||||
    label: gpt
 | 
			
		||||
    fs_type: ext4
 | 
			
		||||
    state: present
 | 
			
		||||
 | 
			
		||||
- name: Create an ext4 filesystem on /dev/vdb1
 | 
			
		||||
  community.general.filesystem:
 | 
			
		||||
    fstype: ext4
 | 
			
		||||
    dev: /dev/vdb1
 | 
			
		||||
 | 
			
		||||
- name: Get UUID of /dev/vdb1
 | 
			
		||||
  ansible.builtin.command:
 | 
			
		||||
    cmd: blkid -s UUID -o value /dev/vdb1
 | 
			
		||||
  register: ipfs_vdb1_uuid
 | 
			
		||||
  changed_when: false
 | 
			
		||||
 | 
			
		||||
- name: Mount /dev/vdb1 to /mnt/blockstorage
 | 
			
		||||
  ansible.posix.mount:
 | 
			
		||||
    path: /mnt/blockstorage
 | 
			
		||||
    src: /dev/disk/by-uuid/{{ ipfs_vdb1_uuid.stdout }}
 | 
			
		||||
    fstype: ext4
 | 
			
		||||
    opts: defaults,noatime,nofail
 | 
			
		||||
    state: mounted
 | 
			
		||||
							
								
								
									
										56
									
								
								ansible/roles/ipfs/tasks/config.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								ansible/roles/ipfs/tasks/config.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,56 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Load IPFS config from remote
 | 
			
		||||
  ansible.builtin.slurp:
 | 
			
		||||
    src: "{{ ipfs_path }}/config"
 | 
			
		||||
  register: ipfs_config_raw
 | 
			
		||||
 | 
			
		||||
- name: Parse IPFS config JSON
 | 
			
		||||
  ansible.builtin.set_fact:
 | 
			
		||||
    ipfs_config: "{{ ipfs_config_raw.content | b64decode | from_json }}"
 | 
			
		||||
 | 
			
		||||
# - name: Debug IPFS config
 | 
			
		||||
#   ansible.builtin.debug:
 | 
			
		||||
#     var: ipfs_config
 | 
			
		||||
 | 
			
		||||
- name: Show storagemax variable
 | 
			
		||||
  ansible.builtin.debug:
 | 
			
		||||
    msg: " here is what we ahvea configured for ipfs_storage_max:{{ ipfs_storage_max }}"
 | 
			
		||||
 | 
			
		||||
- name: Show storagemax before
 | 
			
		||||
  ansible.builtin.debug:
 | 
			
		||||
    msg: "here is the storagemax before: {{ ipfs_config.Datastore.StorageMax }}"
 | 
			
		||||
 | 
			
		||||
# noqa command-instead-of-shell
 | 
			
		||||
- name: Configure IPFS Provide.DHT.SweepEnabled
 | 
			
		||||
  ansible.builtin.shell:
 | 
			
		||||
    cmd: "env IPFS_PATH={{ ipfs_path }} /usr/local/bin/ipfs config --json Provide.DHT.SweepEnabled true"
 | 
			
		||||
  when: ipfs_config.Provide.DHT.SweepEnabled is not defined or not true
 | 
			
		||||
  changed_when: true # explicitly mark it as a change when it runs
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs
 | 
			
		||||
  become: true
 | 
			
		||||
  become_user: ipfs
 | 
			
		||||
 | 
			
		||||
# noqa command-instead-of-shell
 | 
			
		||||
- name: Configure IPFS Datastore.StorageMax
 | 
			
		||||
  ansible.builtin.shell:
 | 
			
		||||
    cmd: "env IPFS_PATH={{ ipfs_path }} /usr/local/bin/ipfs config Datastore.StorageMax {{ ipfs_storage_max }}"
 | 
			
		||||
  when: ipfs_config.Datastore.StorageMax is not defined or ipfs_config.Datastore.StorageMax != ipfs_storage_max
 | 
			
		||||
  changed_when: true
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs
 | 
			
		||||
  become: true
 | 
			
		||||
  become_user: ipfs
 | 
			
		||||
 | 
			
		||||
- name: Load IPFS config from remote
 | 
			
		||||
  ansible.builtin.slurp:
 | 
			
		||||
    src: "{{ ipfs_path }}/config"
 | 
			
		||||
  register: ipfs_config_raw_after
 | 
			
		||||
 | 
			
		||||
- name: Parse IPFS config JSON after making changes
 | 
			
		||||
  ansible.builtin.set_fact:
 | 
			
		||||
    ipfs_config_after: "{{ ipfs_config_raw_after.content | b64decode | from_json }}"
 | 
			
		||||
 | 
			
		||||
- name: Blah
 | 
			
		||||
  ansible.builtin.debug:
 | 
			
		||||
    msg: "here is the storagemax after: {{ ipfs_config_after.Datastore.StorageMax }}"
 | 
			
		||||
							
								
								
									
										13
									
								
								ansible/roles/ipfs/tasks/fs-repo-migrations.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								ansible/roles/ipfs/tasks/fs-repo-migrations.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,13 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Download and extract IPFS fs-repo-migrations
 | 
			
		||||
  ansible.builtin.unarchive:
 | 
			
		||||
    src: "https://dist.ipfs.tech/fs-repo-migrations/{{ ipfs_migrations_version }}/fs-repo-migrations_{{ ipfs_migrations_version }}_linux-amd64.tar.gz"
 | 
			
		||||
    dest: /tmp
 | 
			
		||||
    remote_src: true
 | 
			
		||||
 | 
			
		||||
- name: Install IPFS fs-repo-migrations
 | 
			
		||||
  ansible.builtin.copy:
 | 
			
		||||
    src: /tmp/fs-repo-migrations/fs-repo-migrations
 | 
			
		||||
    dest: /usr/local/bin/fs-repo-migrations
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
    remote_src: true
 | 
			
		||||
							
								
								
									
										7
									
								
								ansible/roles/ipfs/tasks/helpers.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								ansible/roles/ipfs/tasks/helpers.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,7 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Create pinall.sh
 | 
			
		||||
  ansible.builtin.copy:
 | 
			
		||||
    src: pinall.sh
 | 
			
		||||
    dest: /usr/local/bin/pinall.sh
 | 
			
		||||
    remote_src: false
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
							
								
								
									
										29
									
								
								ansible/roles/ipfs/tasks/ipfs-cluster-follow.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								ansible/roles/ipfs/tasks/ipfs-cluster-follow.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,29 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Download and extract ipfs-cluster-follow
 | 
			
		||||
  ansible.builtin.unarchive:
 | 
			
		||||
    src: "https://dist.ipfs.tech/ipfs-cluster-follow/{{ ipfs_cluster_follow_version }}/ipfs-cluster-follow_{{ ipfs_cluster_follow_version }}_linux-amd64.tar.gz"
 | 
			
		||||
    dest: /tmp
 | 
			
		||||
    remote_src: true
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs-cluster-follow
 | 
			
		||||
 | 
			
		||||
- name: Install ipfs-cluster-follow
 | 
			
		||||
  ansible.builtin.copy:
 | 
			
		||||
    src: /tmp/ipfs-cluster-follow/ipfs-cluster-follow
 | 
			
		||||
    dest: /usr/local/bin/ipfs-cluster-follow
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
    remote_src: true
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs-cluster-follow
 | 
			
		||||
 | 
			
		||||
- name: Generate random peername
 | 
			
		||||
  ansible.builtin.set_fact:
 | 
			
		||||
    ipfs_cluster_peername: "{{ lookup('password', '/dev/null length=8 chars=hexdigits') }}"
 | 
			
		||||
 | 
			
		||||
- name: Create ipfs-cluster-follow service
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: ipfs-cluster-follow.service.j2
 | 
			
		||||
    dest: /etc/systemd/system/ipfs-cluster-follow.service
 | 
			
		||||
    mode: "0644"
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs-cluster-follow
 | 
			
		||||
							
								
								
									
										25
									
								
								ansible/roles/ipfs/tasks/ipfs-cluster-service.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								ansible/roles/ipfs/tasks/ipfs-cluster-service.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,25 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Download and extract ipfs-cluster-service
 | 
			
		||||
  ansible.builtin.unarchive:
 | 
			
		||||
    src: "https://dist.ipfs.tech/ipfs-cluster-service/{{ ipfs_cluster_service_version }}/ipfs-cluster-service_{{ ipfs_cluster_service_version }}_linux-amd64.tar.gz"
 | 
			
		||||
    dest: /tmp
 | 
			
		||||
    remote_src: true
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs-cluster-service
 | 
			
		||||
 | 
			
		||||
- name: Install ipfs-cluster-service
 | 
			
		||||
  ansible.builtin.copy:
 | 
			
		||||
    src: /tmp/ipfs-cluster-service/ipfs-cluster-service
 | 
			
		||||
    dest: /usr/local/bin/ipfs-cluster-service
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
    remote_src: true
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs-cluster-service
 | 
			
		||||
 | 
			
		||||
- name: Create ipfs-cluster-service service
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: ipfs-cluster-service.service.j2
 | 
			
		||||
    dest: /etc/systemd/system/ipfs-cluster-service.service
 | 
			
		||||
    mode: "0644"
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs-cluster-service
 | 
			
		||||
							
								
								
									
										45
									
								
								ansible/roles/ipfs/tasks/kubo.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								ansible/roles/ipfs/tasks/kubo.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,45 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Download and extract IPFS Kubo
 | 
			
		||||
  ansible.builtin.unarchive:
 | 
			
		||||
    src: "https://dist.ipfs.tech/kubo/{{ ipfs_kubo_version }}/kubo_{{ ipfs_kubo_version }}_linux-amd64.tar.gz"
 | 
			
		||||
    dest: /tmp
 | 
			
		||||
    remote_src: true
 | 
			
		||||
  notify:
 | 
			
		||||
    - Initialize ipfs
 | 
			
		||||
    - Restart ipfs
 | 
			
		||||
 | 
			
		||||
- name: Install IPFS Kubo
 | 
			
		||||
  ansible.builtin.copy:
 | 
			
		||||
    src: /tmp/kubo/ipfs
 | 
			
		||||
    dest: /usr/local/bin/ipfs
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
    remote_src: true
 | 
			
		||||
  notify:
 | 
			
		||||
    - Initialize ipfs
 | 
			
		||||
    - Restart ipfs
 | 
			
		||||
 | 
			
		||||
- name: Create IPFS directory
 | 
			
		||||
  ansible.builtin.file:
 | 
			
		||||
    dest: "{{ ipfs_path }}"
 | 
			
		||||
    owner: ipfs
 | 
			
		||||
    group: ipfs
 | 
			
		||||
    state: directory
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
  notify:
 | 
			
		||||
    - Initialize ipfs
 | 
			
		||||
    - Configure ipfs
 | 
			
		||||
    - Restart ipfs
 | 
			
		||||
 | 
			
		||||
- name: Create IPFS service
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: ipfs.service.j2
 | 
			
		||||
    dest: /etc/systemd/system/ipfs.service
 | 
			
		||||
    mode: "0644"
 | 
			
		||||
  notify:
 | 
			
		||||
    - Initialize ipfs
 | 
			
		||||
    - Configure ipfs
 | 
			
		||||
    - Restart ipfs
 | 
			
		||||
 | 
			
		||||
- name: Install fs-repo-migrations
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: fs-repo-migrations.yml
 | 
			
		||||
@ -1,175 +1,34 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Configure firewall (UDP & TCP)
 | 
			
		||||
  community.general.ufw:
 | 
			
		||||
    rule: allow
 | 
			
		||||
    port: "{{ item }}"
 | 
			
		||||
    proto: any
 | 
			
		||||
  loop:
 | 
			
		||||
    - 4001
 | 
			
		||||
    - 24007
 | 
			
		||||
    - 24008
 | 
			
		||||
 | 
			
		||||
- name: Configure firewall (TCP)
 | 
			
		||||
  community.general.ufw:
 | 
			
		||||
    rule: allow
 | 
			
		||||
    port: "{{ item }}"
 | 
			
		||||
    proto: tcp
 | 
			
		||||
  loop:
 | 
			
		||||
    - 29152:65535
 | 
			
		||||
- name: Setup Firewall and system misc
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: system.yml
 | 
			
		||||
 | 
			
		||||
- name: Configure Blockstorage
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: blockstorage.yml
 | 
			
		||||
  when: ipfs_enable_blockstorage
 | 
			
		||||
 | 
			
		||||
- name: Install glusterfs
 | 
			
		||||
  ansible.builtin.apt:
 | 
			
		||||
    name:
 | 
			
		||||
      - glusterfs-server
 | 
			
		||||
    state: present
 | 
			
		||||
- name: Configure kubo
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: kubo.yml
 | 
			
		||||
  when: ipfs_enable_kubo
 | 
			
		||||
 | 
			
		||||
- name: Start & enable glusterd service
 | 
			
		||||
  ansible.builtin.systemd_service:
 | 
			
		||||
    name: glusterd.service
 | 
			
		||||
    state: started
 | 
			
		||||
    enabled: true
 | 
			
		||||
- name: Configure ipfs-cluster-follow
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: ipfs-cluster-follow.yml
 | 
			
		||||
  when: ipfs_enable_ipfs_cluster_follow
 | 
			
		||||
 | 
			
		||||
# - name: Create gluster volume
 | 
			
		||||
#   gluster.gluster.gluster_volume:
 | 
			
		||||
#     state: present
 | 
			
		||||
#     name: ipfs-datastore
 | 
			
		||||
#     bricks: /bricks/brick1/g1
 | 
			
		||||
#     rebalance: true
 | 
			
		||||
#     cluster: "{{ groups['ipfs'] }}"
 | 
			
		||||
#   run_once: true
 | 
			
		||||
- name: Configure ipfs-cluster-service
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: ipfs-cluster-service.yml
 | 
			
		||||
  when: ipfs_enable_ipfs_cluster_service
 | 
			
		||||
 | 
			
		||||
- name: Configure helpers
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: helpers.yml
 | 
			
		||||
 | 
			
		||||
# - name: Start gluster volume
 | 
			
		||||
#   gluster.gluster.gluster_volume:
 | 
			
		||||
#     state: started
 | 
			
		||||
#     name: ipfs-datastore
 | 
			
		||||
 | 
			
		||||
# - name: Limit volume usage
 | 
			
		||||
#   gluster.gluster.gluster_volume:
 | 
			
		||||
#     state: present
 | 
			
		||||
#     name: ipfs-datastore
 | 
			
		||||
#     directory: /
 | 
			
		||||
#     quota: 6.0TB
 | 
			
		||||
 | 
			
		||||
## Example: mount -t glusterfs fp-bright-0:/gv0 /mountme
 | 
			
		||||
# - name: Mount gluster volume
 | 
			
		||||
#   ansible.posix.mount:
 | 
			
		||||
#     src: "{{ ansible_hostname }}:/g1"
 | 
			
		||||
#     path: /mnt/g1
 | 
			
		||||
#     fstype: glusterfs
 | 
			
		||||
#     state: mounted
 | 
			
		||||
 | 
			
		||||
- name: Create ipfs group
 | 
			
		||||
  ansible.builtin.group:
 | 
			
		||||
    name: ipfs
 | 
			
		||||
    state: present
 | 
			
		||||
 | 
			
		||||
- name: Create ipfs user
 | 
			
		||||
  ansible.builtin.user:
 | 
			
		||||
    name: ipfs
 | 
			
		||||
    group: ipfs
 | 
			
		||||
    create_home: true
 | 
			
		||||
    home: /home/ipfs
 | 
			
		||||
    system: true
 | 
			
		||||
 | 
			
		||||
- name: Download and extract IPFS Kubo
 | 
			
		||||
  ansible.builtin.unarchive:
 | 
			
		||||
    src: "https://dist.ipfs.tech/kubo/{{ ipfs_kubo_version }}/kubo_{{ ipfs_kubo_version }}_linux-amd64.tar.gz"
 | 
			
		||||
    dest: /tmp
 | 
			
		||||
    remote_src: true
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs
 | 
			
		||||
 | 
			
		||||
- name: Install IPFS Kubo
 | 
			
		||||
  ansible.builtin.copy:
 | 
			
		||||
    src: /tmp/kubo/ipfs
 | 
			
		||||
    dest: /usr/local/bin/ipfs
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
    remote_src: true
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs
 | 
			
		||||
 | 
			
		||||
- name: Download and extract ipfs-cluster-follow
 | 
			
		||||
  ansible.builtin.unarchive:
 | 
			
		||||
    src: "https://dist.ipfs.tech/ipfs-cluster-follow/{{ ipfs_cluster_follow_version }}/ipfs-cluster-follow_{{ ipfs_cluster_follow_version }}_linux-amd64.tar.gz"
 | 
			
		||||
    dest: /tmp
 | 
			
		||||
    remote_src: true
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs-cluster-follow
 | 
			
		||||
 | 
			
		||||
- name: Install ipfs-cluster-follow
 | 
			
		||||
  ansible.builtin.copy:
 | 
			
		||||
    src: /tmp/ipfs-cluster-follow/ipfs-cluster-follow
 | 
			
		||||
    dest: /usr/local/bin/ipfs-cluster-follow
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
    remote_src: true
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs-cluster-follow
 | 
			
		||||
 | 
			
		||||
- name: Generate random peername
 | 
			
		||||
  ansible.builtin.set_fact:
 | 
			
		||||
    cluster_peername: "{{ lookup('password', '/dev/null length=8 chars=hexdigits') }}"
 | 
			
		||||
 | 
			
		||||
- name: Create ipfs-cluster-follow service
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: ipfs-cluster-follow.service.j2
 | 
			
		||||
    dest: /etc/systemd/system/ipfs-cluster-follow.service
 | 
			
		||||
    mode: "0644"
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs-cluster-follow
 | 
			
		||||
 | 
			
		||||
- name: Create ipfs service
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: ipfs.service.j2
 | 
			
		||||
    dest: /etc/systemd/system/ipfs.service
 | 
			
		||||
    mode: "0644"
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs
 | 
			
		||||
 | 
			
		||||
- name: Check current value of Routing.AcceleratedDHTClient
 | 
			
		||||
  ansible.builtin.command: ipfs config Routing.AcceleratedDHTClient
 | 
			
		||||
  register: ipfs_dht_config
 | 
			
		||||
  changed_when: false # this never changes things, it only gathers data
 | 
			
		||||
 | 
			
		||||
- name: Enable IPFS Routing.AcceleratedDHTClient
 | 
			
		||||
  ansible.builtin.command: ipfs config --json Routing.AcceleratedDHTClient true
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs
 | 
			
		||||
  when: ipfs_dht_config.stdout != "true"
 | 
			
		||||
  changed_when: true
 | 
			
		||||
 | 
			
		||||
- name: Create IPFS directory
 | 
			
		||||
  ansible.builtin.file:
 | 
			
		||||
    dest: /home/ipfs/.ipfs
 | 
			
		||||
    owner: ipfs
 | 
			
		||||
    group: ipfs
 | 
			
		||||
    state: directory
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
 | 
			
		||||
- name: Check if IPFS config exists
 | 
			
		||||
  ansible.builtin.stat:
 | 
			
		||||
    path: /home/ipfs/.ipfs/config
 | 
			
		||||
  register: ipfs_config
 | 
			
		||||
 | 
			
		||||
- name: Initialize IPFS
 | 
			
		||||
  ansible.builtin.command: /usr/local/bin/ipfs init
 | 
			
		||||
  become: true
 | 
			
		||||
  become_user: ipfs
 | 
			
		||||
  args:
 | 
			
		||||
    chdir: /home/ipfs
 | 
			
		||||
  when: not ipfs_config.stat.exists
 | 
			
		||||
  changed_when: true  # Explicitly mark this as a change when it runs
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs
 | 
			
		||||
 | 
			
		||||
## @todo enable once we get gluster working
 | 
			
		||||
# - name: Symlink IPFS blocks directory to gluster brick
 | 
			
		||||
#   ansible.builtin.file:
 | 
			
		||||
#     src: /home/ipfs/.ipfs/blocks
 | 
			
		||||
#     dest: /mnt/g1/.ipfs/blocks
 | 
			
		||||
#     owner: ipfs
 | 
			
		||||
#     group: ipfs
 | 
			
		||||
#     state: link
 | 
			
		||||
#   notify:
 | 
			
		||||
#     - Restart ipfs
 | 
			
		||||
# This should always be last
 | 
			
		||||
- name: Configure ipfs
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: config.yml
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										44
									
								
								ansible/roles/ipfs/tasks/system.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								ansible/roles/ipfs/tasks/system.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,44 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Configure firewall (UDP & TCP)
 | 
			
		||||
  community.general.ufw:
 | 
			
		||||
    rule: allow
 | 
			
		||||
    port: "{{ item }}"
 | 
			
		||||
    proto: any
 | 
			
		||||
  loop:
 | 
			
		||||
    - 4001
 | 
			
		||||
    - 24007
 | 
			
		||||
    - 24008
 | 
			
		||||
 | 
			
		||||
- name: Configure firewall (TCP)
 | 
			
		||||
  community.general.ufw:
 | 
			
		||||
    rule: allow
 | 
			
		||||
    port: "{{ item }}"
 | 
			
		||||
    proto: tcp
 | 
			
		||||
  loop:
 | 
			
		||||
    - 29152:65535
 | 
			
		||||
 | 
			
		||||
- name: Create ipfs group
 | 
			
		||||
  ansible.builtin.group:
 | 
			
		||||
    name: ipfs
 | 
			
		||||
    state: present
 | 
			
		||||
 | 
			
		||||
- name: Create ipfs user
 | 
			
		||||
  ansible.builtin.user:
 | 
			
		||||
    name: ipfs
 | 
			
		||||
    group: ipfs
 | 
			
		||||
    create_home: true
 | 
			
		||||
    home: /home/ipfs
 | 
			
		||||
    system: true
 | 
			
		||||
 | 
			
		||||
# @see https://github.com/quic-go/quic-go/wiki/UDP-Buffer-Sizes
 | 
			
		||||
- name: Set sysctl values for net.core.rmem_max and wmem_max
 | 
			
		||||
  ansible.posix.sysctl:
 | 
			
		||||
    name: "{{ item.name }}"
 | 
			
		||||
    value: "{{ item.value }}"
 | 
			
		||||
    state: present
 | 
			
		||||
    reload: true
 | 
			
		||||
  loop:
 | 
			
		||||
    - { name: "net.core.rmem_max", value: "7500000" }
 | 
			
		||||
    - { name: "net.core.wmem_max", value: "7500000" }
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart ipfs
 | 
			
		||||
@ -3,7 +3,7 @@ Description=ipfs-cluster-follow
 | 
			
		||||
 | 
			
		||||
[Service]
 | 
			
		||||
Type=simple
 | 
			
		||||
Environment=CLUSTER_PEERNAME="{{cluster_peername}}" 
 | 
			
		||||
Environment=CLUSTER_PEERNAME="{{ ipfs_cluster_peername }}" 
 | 
			
		||||
ExecStart=/usr/local/bin/ipfs-cluster-follow futureporn.net run --init https://futureporn.net/api/service.json
 | 
			
		||||
User=ipfs
 | 
			
		||||
Restart=always
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										0
									
								
								ansible/roles/ipfs/templates/ipfs-config.json.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								ansible/roles/ipfs/templates/ipfs-config.json.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,12 @@
 | 
			
		||||
[Unit]
 | 
			
		||||
Description=IPFS Repo Migrations
 | 
			
		||||
 | 
			
		||||
[Service]
 | 
			
		||||
Type=oneshot
 | 
			
		||||
Environment=IPFS_PATH={{ ipfs_path }}
 | 
			
		||||
ExecStart=/usr/local/bin/fs-repo-migrations -y
 | 
			
		||||
User=ipfs
 | 
			
		||||
 | 
			
		||||
[Install]
 | 
			
		||||
WantedBy=multi-user.target
 | 
			
		||||
 | 
			
		||||
@ -3,7 +3,8 @@ Description=IPFS Daemon
 | 
			
		||||
 | 
			
		||||
[Service]
 | 
			
		||||
Type=simple
 | 
			
		||||
Environment=IPFS_PATH=/home/ipfs/.ipfs
 | 
			
		||||
Environment=IPFS_PATH={{ ipfs_path }}
 | 
			
		||||
Environment=IPFS_TELEMETRY=off
 | 
			
		||||
ExecStart=/usr/local/bin/ipfs daemon
 | 
			
		||||
User=ipfs
 | 
			
		||||
Restart=always
 | 
			
		||||
@ -11,4 +12,3 @@ RestartSec=10
 | 
			
		||||
 | 
			
		||||
[Install]
 | 
			
		||||
WantedBy=multi-user.target
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										0
									
								
								ansible/roles/komodo/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								ansible/roles/komodo/README.md
									
									
									
									
									
										Normal file
									
								
							
							
								
								
									
										29
									
								
								ansible/roles/komodo/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								ansible/roles/komodo/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,29 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Ensure komodo directory exists
 | 
			
		||||
  ansible.builtin.file:
 | 
			
		||||
    path: /opt/komodo
 | 
			
		||||
    state: directory
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
 | 
			
		||||
- name: Get docker compose file
 | 
			
		||||
  ansible.builtin.get_url:
 | 
			
		||||
    url: komodo https://raw.githubusercontent.com/moghtech/komodo/main/compose/ferretdb.compose.yaml
 | 
			
		||||
    dest: /opt/komodo
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
 | 
			
		||||
- name: Get .env file
 | 
			
		||||
  ansible.builtin.get_url:
 | 
			
		||||
    url: https://raw.githubusercontent.com/moghtech/komodo/main/compose/compose.env
 | 
			
		||||
    dest: /opt/komodo
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
 | 
			
		||||
# we need to use lineinfile to set the following
 | 
			
		||||
- name: set config
 | 
			
		||||
  ansible.builtin.lineinfile:
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- name: Run Komodo core
 | 
			
		||||
 | 
			
		||||
  
 | 
			
		||||
							
								
								
									
										119
									
								
								ansible/roles/komodo/templates/compose.env.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										119
									
								
								ansible/roles/komodo/templates/compose.env.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,119 @@
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# KOMODO_DISABLE_USER_REGISTRATION=true
 | 
			
		||||
# KOMODO_ENABLE_NEW_USERS=false
 | 
			
		||||
# KOMODO_DISABLE_NON_ADMIN_CREATE=true
 | 
			
		||||
# KOMODO_HOST=https://komodo.future.porn
 | 
			
		||||
# KOMODO_DB_USERNAME=admin
 | 
			
		||||
# KOMODO_DB_PASSWORD=admin
 | 
			
		||||
# KOMODO_PASSKEY=a_random_passkey
 | 
			
		||||
 | 
			
		||||
####################################
 | 
			
		||||
# 🦎 KOMODO COMPOSE - VARIABLES 🦎 #
 | 
			
		||||
####################################
 | 
			
		||||
 | 
			
		||||
## These compose variables can be used with all Komodo deployment options.
 | 
			
		||||
## Pass these variables to the compose up command using `--env-file komodo/compose.env`.
 | 
			
		||||
## Additionally, they are passed to both Komodo Core and Komodo Periphery with `env_file: ./compose.env`,
 | 
			
		||||
## so you can pass any additional environment variables to Core / Periphery directly in this file as well.
 | 
			
		||||
 | 
			
		||||
## Stick to a specific version, or use `latest`
 | 
			
		||||
COMPOSE_KOMODO_IMAGE_TAG=latest
 | 
			
		||||
 | 
			
		||||
## DB credentials
 | 
			
		||||
KOMODO_DB_USERNAME=admin
 | 
			
		||||
KOMODO_DB_PASSWORD=admin
 | 
			
		||||
 | 
			
		||||
## Configure a secure passkey to authenticate between Core / Periphery.
 | 
			
		||||
KOMODO_PASSKEY=a_random_passkey
 | 
			
		||||
 | 
			
		||||
## Set your time zone for schedules
 | 
			
		||||
## https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
 | 
			
		||||
TZ=Etc/UTC
 | 
			
		||||
 | 
			
		||||
#=-------------------------=#
 | 
			
		||||
#= Komodo Core Environment =#
 | 
			
		||||
#=-------------------------=#
 | 
			
		||||
 | 
			
		||||
## Full variable list + descriptions are available here:
 | 
			
		||||
## 🦎 https://github.com/moghtech/komodo/blob/main/config/core.config.toml 🦎
 | 
			
		||||
 | 
			
		||||
## Note. Secret variables also support `${VARIABLE}_FILE` syntax to pass docker compose secrets.
 | 
			
		||||
## Docs: https://docs.docker.com/compose/how-tos/use-secrets/#examples
 | 
			
		||||
 | 
			
		||||
## Used for Oauth / Webhook url suggestion / Caddy reverse proxy.
 | 
			
		||||
KOMODO_HOST="{{ komodo_host }}"
 | 
			
		||||
## Displayed in the browser tab.
 | 
			
		||||
KOMODO_TITLE=fp Komodo
 | 
			
		||||
## Create a server matching this address as the "first server".
 | 
			
		||||
## Use `https://host.docker.internal:8120` when using systemd-managed Periphery.
 | 
			
		||||
KOMODO_FIRST_SERVER=https://periphery:8120
 | 
			
		||||
## Make all buttons just double-click, rather than the full confirmation dialog.
 | 
			
		||||
KOMODO_DISABLE_CONFIRM_DIALOG=false
 | 
			
		||||
 | 
			
		||||
## Rate Komodo polls your servers for
 | 
			
		||||
## status / container status / system stats / alerting.
 | 
			
		||||
## Options: 1-sec, 5-sec, 15-sec, 1-min, 5-min, 15-min
 | 
			
		||||
## Default: 15-sec
 | 
			
		||||
KOMODO_MONITORING_INTERVAL="15-sec"
 | 
			
		||||
## Interval at which to poll Resources for any updates / automated actions.
 | 
			
		||||
## Options: 15-min, 1-hr, 2-hr, 6-hr, 12-hr, 1-day
 | 
			
		||||
## Default: 1-hr
 | 
			
		||||
KOMODO_RESOURCE_POLL_INTERVAL="1-hr"
 | 
			
		||||
 | 
			
		||||
## Used to auth incoming webhooks. Alt: KOMODO_WEBHOOK_SECRET_FILE
 | 
			
		||||
KOMODO_WEBHOOK_SECRET=a_random_secret
 | 
			
		||||
## Used to generate jwt. Alt: KOMODO_JWT_SECRET_FILE
 | 
			
		||||
KOMODO_JWT_SECRET=a_random_jwt_secret
 | 
			
		||||
## Time to live for jwt tokens.
 | 
			
		||||
## Options: 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk
 | 
			
		||||
KOMODO_JWT_TTL="1-day"
 | 
			
		||||
 | 
			
		||||
## Enable login with username + password.
 | 
			
		||||
KOMODO_LOCAL_AUTH=true
 | 
			
		||||
## Disable new user signups.
 | 
			
		||||
KOMODO_DISABLE_USER_REGISTRATION=false
 | 
			
		||||
## All new logins are auto enabled
 | 
			
		||||
KOMODO_ENABLE_NEW_USERS=false
 | 
			
		||||
## Disable non-admins from creating new resources.
 | 
			
		||||
KOMODO_DISABLE_NON_ADMIN_CREATE=false
 | 
			
		||||
## Allows all users to have Read level access to all resources.
 | 
			
		||||
KOMODO_TRANSPARENT_MODE=false
 | 
			
		||||
 | 
			
		||||
## Prettier logging with empty lines between logs
 | 
			
		||||
KOMODO_LOGGING_PRETTY=false
 | 
			
		||||
## More human readable logging of startup config (multi-line)
 | 
			
		||||
KOMODO_PRETTY_STARTUP_CONFIG=false
 | 
			
		||||
 | 
			
		||||
## OIDC Login
 | 
			
		||||
KOMODO_OIDC_ENABLED=false
 | 
			
		||||
## Must reachable from Komodo Core container
 | 
			
		||||
# KOMODO_OIDC_PROVIDER=https://oidc.provider.internal/application/o/komodo
 | 
			
		||||
## Change the host to one reachable be reachable by users (optional if it is the same as above).
 | 
			
		||||
## DO NOT include the `path` part of the URL.
 | 
			
		||||
# KOMODO_OIDC_REDIRECT_HOST=https://oidc.provider.external
 | 
			
		||||
## Your OIDC client id
 | 
			
		||||
# KOMODO_OIDC_CLIENT_ID= # Alt: KOMODO_OIDC_CLIENT_ID_FILE
 | 
			
		||||
## Your OIDC client secret.
 | 
			
		||||
## If your provider supports PKCE flow, this can be ommitted.
 | 
			
		||||
# KOMODO_OIDC_CLIENT_SECRET= # Alt: KOMODO_OIDC_CLIENT_SECRET_FILE
 | 
			
		||||
## Make usernames the full email.
 | 
			
		||||
## Note. This does not work for all OIDC providers.
 | 
			
		||||
# KOMODO_OIDC_USE_FULL_EMAIL=true
 | 
			
		||||
## Add additional trusted audiences for token claims verification.
 | 
			
		||||
## Supports comma separated list, and passing with _FILE (for compose secrets).
 | 
			
		||||
# KOMODO_OIDC_ADDITIONAL_AUDIENCES=abc,123 # Alt: KOMODO_OIDC_ADDITIONAL_AUDIENCES_FILE
 | 
			
		||||
 | 
			
		||||
## Github Oauth
 | 
			
		||||
KOMODO_GITHUB_OAUTH_ENABLED=false
 | 
			
		||||
# KOMODO_GITHUB_OAUTH_ID= # Alt: KOMODO_GITHUB_OAUTH_ID_FILE
 | 
			
		||||
# KOMODO_GITHUB_OAUTH_SECRET= # Alt: KOMODO_GITHUB_OAUTH_SECRET_FILE
 | 
			
		||||
 | 
			
		||||
## Google Oauth
 | 
			
		||||
KOMODO_GOOGLE_OAUTH_ENABLED=false
 | 
			
		||||
# KOMODO_GOOGLE_OAUTH_ID= # Alt: KOMODO_GOOGLE_OAUTH_ID_FILE
 | 
			
		||||
# KOMODO_GOOGLE_OAUTH_SECRET= # Alt: KOMODO_GOOGLE_OAUTH_SECRET_FILE
 | 
			
		||||
 | 
			
		||||
## Aws - Used to launch Builder instances.
 | 
			
		||||
KOMODO_AWS_ACCESS_KEY_ID= # Alt: KOMODO_AWS_ACCESS_KEY_ID_FILE
 | 
			
		||||
KOMODO_AWS_SECRET_ACCESS_KEY= # Alt: KOMODO_AWS_SECRET_ACCESS_KEY_FILE
 | 
			
		||||
							
								
								
									
										95
									
								
								ansible/roles/komodo/templates/dockerfile.yml.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										95
									
								
								ansible/roles/komodo/templates/dockerfile.yml.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,95 @@
 | 
			
		||||
###################################
 | 
			
		||||
# 🦎 KOMODO COMPOSE - FERRETDB 🦎 #
 | 
			
		||||
###################################
 | 
			
		||||
 | 
			
		||||
## This compose file will deploy:
 | 
			
		||||
##   1. Postgres + FerretDB Mongo adapter (https://www.ferretdb.com)
 | 
			
		||||
##   2. Komodo Core
 | 
			
		||||
##   3. Komodo Periphery
 | 
			
		||||
 | 
			
		||||
services:
 | 
			
		||||
  postgres:
 | 
			
		||||
    # Recommended: Pin to a specific version
 | 
			
		||||
    # https://github.com/FerretDB/documentdb/pkgs/container/postgres-documentdb
 | 
			
		||||
    image: ghcr.io/ferretdb/postgres-documentdb
 | 
			
		||||
    labels:
 | 
			
		||||
      komodo.skip: # Prevent Komodo from stopping with StopAllContainers
 | 
			
		||||
    restart: unless-stopped
 | 
			
		||||
    # ports:
 | 
			
		||||
    #   - 5432:5432
 | 
			
		||||
    volumes:
 | 
			
		||||
      - postgres-data:/var/lib/postgresql/data
 | 
			
		||||
    environment:
 | 
			
		||||
      POSTGRES_USER: ${KOMODO_DB_USERNAME}
 | 
			
		||||
      POSTGRES_PASSWORD: ${KOMODO_DB_PASSWORD}
 | 
			
		||||
      POSTGRES_DB: postgres
 | 
			
		||||
 | 
			
		||||
  ferretdb:
 | 
			
		||||
    # Recommended: Pin to a specific version
 | 
			
		||||
    # https://github.com/FerretDB/FerretDB/pkgs/container/ferretdb
 | 
			
		||||
    image: ghcr.io/ferretdb/ferretdb
 | 
			
		||||
    labels:
 | 
			
		||||
      komodo.skip: # Prevent Komodo from stopping with StopAllContainers
 | 
			
		||||
    restart: unless-stopped
 | 
			
		||||
    depends_on:
 | 
			
		||||
      - postgres
 | 
			
		||||
    # ports:
 | 
			
		||||
    #   - 27017:27017
 | 
			
		||||
    volumes:
 | 
			
		||||
      - ferretdb-state:/state
 | 
			
		||||
    environment:
 | 
			
		||||
      FERRETDB_POSTGRESQL_URL: postgres://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@postgres:5432/postgres
 | 
			
		||||
  
 | 
			
		||||
  core:
 | 
			
		||||
    image: ghcr.io/moghtech/komodo-core:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
 | 
			
		||||
    labels:
 | 
			
		||||
      komodo.skip: # Prevent Komodo from stopping with StopAllContainers
 | 
			
		||||
    restart: unless-stopped
 | 
			
		||||
    depends_on:
 | 
			
		||||
      - ferretdb
 | 
			
		||||
    ports:
 | 
			
		||||
      - 9120:9120
 | 
			
		||||
    env_file: ./compose.env
 | 
			
		||||
    environment:
 | 
			
		||||
      KOMODO_DATABASE_ADDRESS: ferretdb:27017
 | 
			
		||||
      KOMODO_DATABASE_USERNAME: ${KOMODO_DB_USERNAME}
 | 
			
		||||
      KOMODO_DATABASE_PASSWORD: ${KOMODO_DB_PASSWORD}
 | 
			
		||||
    volumes:
 | 
			
		||||
      ## Core cache for repos for latest commit hash / contents
 | 
			
		||||
      - repo-cache:/repo-cache
 | 
			
		||||
      ## Store sync files on server
 | 
			
		||||
      # - /path/to/syncs:/syncs
 | 
			
		||||
      ## Optionally mount a custom core.config.toml
 | 
			
		||||
      # - /path/to/core.config.toml:/config/config.toml
 | 
			
		||||
    ## Allows for systemd Periphery connection at 
 | 
			
		||||
    ## "http://host.docker.internal:8120"
 | 
			
		||||
    # extra_hosts:
 | 
			
		||||
    #   - host.docker.internal:host-gateway
 | 
			
		||||
 | 
			
		||||
  ## Deploy Periphery container using this block,
 | 
			
		||||
  ## or deploy the Periphery binary with systemd using 
 | 
			
		||||
  ## https://github.com/moghtech/komodo/tree/main/scripts
 | 
			
		||||
  periphery:
 | 
			
		||||
    image: ghcr.io/moghtech/komodo-periphery:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
 | 
			
		||||
    labels:
 | 
			
		||||
      komodo.skip: # Prevent Komodo from stopping with StopAllContainers
 | 
			
		||||
    restart: unless-stopped
 | 
			
		||||
    env_file: ./compose.env
 | 
			
		||||
    volumes:
 | 
			
		||||
      ## Mount external docker socket
 | 
			
		||||
      - /var/run/docker.sock:/var/run/docker.sock
 | 
			
		||||
      ## Allow Periphery to see processes outside of container
 | 
			
		||||
      - /proc:/proc
 | 
			
		||||
      ## Specify the Periphery agent root directory.
 | 
			
		||||
      ## Must be the same inside and outside the container,
 | 
			
		||||
      ## or docker will get confused. See https://github.com/moghtech/komodo/discussions/180.
 | 
			
		||||
      ## Default: /etc/komodo.
 | 
			
		||||
      - ${PERIPHERY_ROOT_DIRECTORY:-/etc/komodo}:${PERIPHERY_ROOT_DIRECTORY:-/etc/komodo}
 | 
			
		||||
 | 
			
		||||
volumes:
 | 
			
		||||
  # Postgres
 | 
			
		||||
  postgres-data:
 | 
			
		||||
  # FerretDB
 | 
			
		||||
  ferretdb-state:
 | 
			
		||||
  # Core
 | 
			
		||||
  repo-cache:
 | 
			
		||||
@ -1,23 +0,0 @@
 | 
			
		||||
future.porn {
 | 
			
		||||
	root * /usr/share/futureporn
 | 
			
		||||
	file_server
 | 
			
		||||
 | 
			
		||||
	# Define the upstream servers for load balancing
 | 
			
		||||
	reverse_proxy {% for host in groups['our'] %}{{ hostvars[host]['internal_ip'] }}:{{our_server_port}} {% endfor %} {
 | 
			
		||||
		# Load balancing policy (optional, defaults to "random")
 | 
			
		||||
		lb_policy round_robin
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
		# Health checks (optional)
 | 
			
		||||
		health_uri /health
 | 
			
		||||
		health_interval 10s
 | 
			
		||||
		health_timeout 5s
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	handle_errors {
 | 
			
		||||
		respond "💥 Error ~ {err.status_code} {err.status_text}"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										21
									
								
								ansible/roles/loadbalancer/templates/Caddyfile.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								ansible/roles/loadbalancer/templates/Caddyfile.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,21 @@
 | 
			
		||||
{% set sites = ['future.porn', 'pgadmin.sbtp.xyz', 'rssapp.sbtp.xyz'] %}
 | 
			
		||||
 | 
			
		||||
{% for site in sites %}
 | 
			
		||||
{{ site }} {
 | 
			
		||||
 | 
			
		||||
	# Define the upstream servers (docker swarm nodes) for load balancing
 | 
			
		||||
	reverse_proxy {% for host in groups['our'] %}{{ hostvars[host]['internal_ip'] }}:{{ our_server_port }} {% endfor %} {
 | 
			
		||||
		# Load balancing policy (optional, defaults to "random")
 | 
			
		||||
		lb_policy least_connections
 | 
			
		||||
 | 
			
		||||
		# Health checks
 | 
			
		||||
		health_uri /health
 | 
			
		||||
		health_interval 10s
 | 
			
		||||
		health_timeout 5s
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	handle_errors {
 | 
			
		||||
		respond "💥 Error; Please try again later. Code {err.status_code} | {err.status_text}."
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
{% endfor %}
 | 
			
		||||
							
								
								
									
										3
									
								
								ansible/roles/our-server/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								ansible/roles/our-server/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,3 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
our_caddy_image: caddy:2
 | 
			
		||||
							
								
								
									
										8
									
								
								ansible/roles/our-server/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								ansible/roles/our-server/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,8 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Restart app
 | 
			
		||||
  ansible.builtin.systemd_service:
 | 
			
		||||
    name: our-server
 | 
			
		||||
    state: restarted
 | 
			
		||||
    enabled: true
 | 
			
		||||
    daemon_reload: true
 | 
			
		||||
							
								
								
									
										198
									
								
								ansible/roles/our-server/tasks/fastify.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										198
									
								
								ansible/roles/our-server/tasks/fastify.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,198 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Create futureporn group
 | 
			
		||||
  ansible.builtin.group:
 | 
			
		||||
    name: futureporn
 | 
			
		||||
    state: present
 | 
			
		||||
 | 
			
		||||
- name: Create futureporn user
 | 
			
		||||
  ansible.builtin.user:
 | 
			
		||||
    name: futureporn
 | 
			
		||||
    group: futureporn
 | 
			
		||||
    create_home: true
 | 
			
		||||
    home: /home/futureporn
 | 
			
		||||
    system: true
 | 
			
		||||
 | 
			
		||||
- name: Ensure futureporn directory exists
 | 
			
		||||
  ansible.builtin.file:
 | 
			
		||||
    path: /opt/futureporn
 | 
			
		||||
    state: directory
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart app
 | 
			
		||||
 | 
			
		||||
- name: Ensure config directory exists
 | 
			
		||||
  ansible.builtin.file:
 | 
			
		||||
    path: /usr/local/etc/futureporn/our
 | 
			
		||||
    state: directory
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart app
 | 
			
		||||
 | 
			
		||||
- name: Generate .env file
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: env.j2
 | 
			
		||||
    dest: "{{ env_file }}"
 | 
			
		||||
    mode: "0600"
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart app
 | 
			
		||||
 | 
			
		||||
- name: Download Futureporn source code
 | 
			
		||||
  ansible.builtin.git:
 | 
			
		||||
    repo: https://gitea.futureporn.net/futureporn/fp
 | 
			
		||||
    dest: /opt/futureporn
 | 
			
		||||
    version: "{{ our_commit }}"
 | 
			
		||||
    update: true
 | 
			
		||||
  tags:
 | 
			
		||||
    - our
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart app
 | 
			
		||||
 | 
			
		||||
- name: Install Our packages based on package.json
 | 
			
		||||
  community.general.npm:
 | 
			
		||||
    path: "{{ app_dir }}"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- name: Install passlib
 | 
			
		||||
  ansible.builtin.pip:
 | 
			
		||||
    name: passlib # dependency of Ansible's passwordhash
 | 
			
		||||
    state: present
 | 
			
		||||
 | 
			
		||||
- name: Create our-server service
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: our-server.service.j2
 | 
			
		||||
    dest: /etc/systemd/system/our-server.service
 | 
			
		||||
    mode: "0644"
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart app
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - name: Template Caddyfile
 | 
			
		||||
#   ansible.builtin.template:
 | 
			
		||||
#     src: Caddyfile.j2
 | 
			
		||||
#     dest: /opt/our/Caddyfile
 | 
			
		||||
#     mode: "0600"
 | 
			
		||||
#   notify:
 | 
			
		||||
#     - Restart caddy
 | 
			
		||||
 | 
			
		||||
# - name: Template Docker Compose file
 | 
			
		||||
#   ansible.builtin.template:
 | 
			
		||||
#     src: docker-compose.yml.j2
 | 
			
		||||
#     dest: /opt/our/docker-compose.yml
 | 
			
		||||
#     mode: "0644"
 | 
			
		||||
#   notify:
 | 
			
		||||
#     - Restart app
 | 
			
		||||
 | 
			
		||||
- name: Set default UFW policy to deny incoming
 | 
			
		||||
  community.general.ufw:
 | 
			
		||||
    state: enabled
 | 
			
		||||
    policy: deny
 | 
			
		||||
    direction: incoming
 | 
			
		||||
 | 
			
		||||
- name: Configure firewall
 | 
			
		||||
  community.general.ufw:
 | 
			
		||||
    rule: allow
 | 
			
		||||
    port: "{{ item }}"
 | 
			
		||||
    proto: tcp
 | 
			
		||||
  loop:
 | 
			
		||||
    - 443
 | 
			
		||||
    - 80
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- name: Allow /20 subnet access
 | 
			
		||||
  community.general.ufw:
 | 
			
		||||
    rule: allow
 | 
			
		||||
    port: "{{ item }}"
 | 
			
		||||
    proto: tcp
 | 
			
		||||
    from: 10.2.112.0/20
 | 
			
		||||
  loop:
 | 
			
		||||
    - 3000
 | 
			
		||||
 | 
			
		||||
# Bright app Reference
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# # Terraform Vultr provider doesn't have a VFS resource/datasource yet.
 | 
			
		||||
# # This is a workaround for that missing feature.
 | 
			
		||||
# #
 | 
			
		||||
# # @see https://github.com/vultr/terraform-provider-vultr/issues/560
 | 
			
		||||
# - name: Get the VFS id
 | 
			
		||||
#   ansible.builtin.uri:
 | 
			
		||||
#     url: https://api.vultr.com/v2/vfs
 | 
			
		||||
#     method: GET
 | 
			
		||||
#     status_code: 200
 | 
			
		||||
#     headers:
 | 
			
		||||
#       Authorization: "Bearer {{ lookup('dotenv', 'VULTR_API_KEY', file='../.env') }}"
 | 
			
		||||
#   register: vfs_list
 | 
			
		||||
 | 
			
		||||
# - name: Get VFS variables
 | 
			
		||||
#   ansible.builtin.set_fact:
 | 
			
		||||
#     our_vfs_id: "{{ vfs_list.json.vfs | selectattr('label', 'equalto', 'our') | map(attribute='id') | first }}"
 | 
			
		||||
 | 
			
		||||
# - name: Debug the our VFS id
 | 
			
		||||
#   ansible.builtin.debug:
 | 
			
		||||
#     msg: "The VFS ID for 'our' is {{ our_vfs_id }}"
 | 
			
		||||
 | 
			
		||||
# - name: Attach VFS to Vultr instance
 | 
			
		||||
#   ansible.builtin.uri:
 | 
			
		||||
#     url: https://api.vultr.com/v2/vfs/{{ our_vfs_id }}/attachments/{{ hostvars[inventory_hostname]['vultr_instance_id'] }}
 | 
			
		||||
#     method: PUT
 | 
			
		||||
#     status_code:
 | 
			
		||||
#       - 200
 | 
			
		||||
#       - 201
 | 
			
		||||
#       - 409
 | 
			
		||||
#     headers:
 | 
			
		||||
#       Authorization: "Bearer {{ lookup('dotenv', 'VULTR_API_KEY', file='../.env') }}"
 | 
			
		||||
#   register: vfs_attach
 | 
			
		||||
#   changed_when:
 | 
			
		||||
#     - vfs_attach.json is defined
 | 
			
		||||
#     - "'state' in vfs_attach.json"
 | 
			
		||||
#     - vfs_attach.json.state == "ATTACHED"
 | 
			
		||||
#   notify:
 | 
			
		||||
#     - Mount vfs
 | 
			
		||||
#     - Restart our
 | 
			
		||||
 | 
			
		||||
# - name: Debug vfs_attach
 | 
			
		||||
#   ansible.builtin.debug:
 | 
			
		||||
#     var: vfs_attach
 | 
			
		||||
 | 
			
		||||
# - name: Get the VFS mount_tag
 | 
			
		||||
#   ansible.builtin.set_fact:
 | 
			
		||||
#     vfs_mount_tag: "{{ vfs_attach.json.mount_tag | default('') }}"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - name: Setup docker container
 | 
			
		||||
#   community.docker.docker_container:
 | 
			
		||||
#     name: our
 | 
			
		||||
#     image: gitea.futureporn.net/futureporn/our:latest
 | 
			
		||||
#     pull: always
 | 
			
		||||
#     state: started
 | 
			
		||||
#     ports:
 | 
			
		||||
#       - "4000:4000"
 | 
			
		||||
#     volumes:
 | 
			
		||||
#       - "/mnt/vfs/futureporn:/mnt/vfs/futureporn"
 | 
			
		||||
#     env:
 | 
			
		||||
#       DB_HOST: "{{ hostvars['fp-db-0']['internal_ip'] }}"
 | 
			
		||||
#       DB_USER: "{{ lookup('dotenv', 'DB_USER', file='../.env') }}"
 | 
			
		||||
#       DB_NAME: "our"
 | 
			
		||||
#       DB_PORT: "5432"
 | 
			
		||||
#       DB_PASS: "{{ lookup('dotenv', 'DB_PASS', file='../.env') }}"
 | 
			
		||||
#       MIX_ENV: prod
 | 
			
		||||
#       PUBLIC_S3_ENDPOINT: https://futureporn-b2.b-cdn.net
 | 
			
		||||
#       PATREON_REDIRECT_URI: https://our.futureporn.net/auth/patreon/callback
 | 
			
		||||
#       SITE_URL: https://our.futureporn.net
 | 
			
		||||
#       PHX_HOST: our.futureporn.net
 | 
			
		||||
#       AWS_BUCKET: futureporn
 | 
			
		||||
#       AWS_REGION: us-west-000
 | 
			
		||||
#       AWS_HOST: s3.us-west-000.backblazeb2.com
 | 
			
		||||
#       SECRET_KEY_BASE: "{{ lookup('dotenv', 'SECRET_KEY_BASE', file='../.env') }}"
 | 
			
		||||
#       PATREON_CLIENT_SECRET: "{{ lookup('dotenv', 'PATREON_CLIENT_SECRET', file='../.env') }}"
 | 
			
		||||
#       PATREON_CLIENT_ID: "{{ lookup('dotenv', 'PATREON_CLIENT_ID', file='../.env') }}"
 | 
			
		||||
#       AWS_ACCESS_KEY_ID: "{{ lookup('dotenv', 'AWS_ACCESS_KEY_ID', file='../.env') }}"
 | 
			
		||||
#       AWS_SECRET_ACCESS_KEY: "{{ lookup('dotenv', 'AWS_SECRET_ACCESS_KEY', file='../.env') }}"
 | 
			
		||||
#       TRACKER_HELPER_ACCESSLIST_URL: https://tracker.futureporn.net/accesslist
 | 
			
		||||
#       TRACKER_HELPER_USERNAME: "{{ lookup('dotenv', 'TRACKER_HELPER_USERNAME', file='../.env') }}"
 | 
			
		||||
#       TRACKER_HELPER_PASSWORD: "{{ lookup('dotenv', 'TRACKER_HELPER_PASSWORD', file='../.env') }}"
 | 
			
		||||
#       TRACKER_URL: https://tracker.futureporn.net:6969
 | 
			
		||||
#       CACHE_DIR: /mnt/vfs/futureporn # we use Vultr File System to share cache among all Phoenix instances
 | 
			
		||||
							
								
								
									
										6
									
								
								ansible/roles/our-server/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								ansible/roles/our-server/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,6 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Setup fastify app
 | 
			
		||||
  ansible.builtin.include_tasks: fastify.yml
 | 
			
		||||
  tags:
 | 
			
		||||
    - fastify
 | 
			
		||||
							
								
								
									
										33
									
								
								ansible/roles/our-server/templates/env.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								ansible/roles/our-server/templates/env.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,33 @@
 | 
			
		||||
ORIGIN=https://future.porn
 | 
			
		||||
 | 
			
		||||
COOKIE_SECRET={{ lookup('dotenv', 'COOKIE_SECRET', file='../../../../.env.production')}}
 | 
			
		||||
DB_USER={{ lookup('dotenv', 'DB_USER', file='../../../../.env.production')}}
 | 
			
		||||
DB_PASSWORD={{ lookup('dotenv', 'DB_PASSWORD', file='../../../../.env.production')}}
 | 
			
		||||
DB_NAME=future_porn
 | 
			
		||||
 | 
			
		||||
CDN_ORIGIN=https://fp-usc.b-cdn.net
 | 
			
		||||
CDN_TOKEN_SECRET={{ lookup('dotenv', 'CDN_TOKEN_SECRET', file='../../../../.env.production')}}
 | 
			
		||||
NODE_ENV=production
 | 
			
		||||
 | 
			
		||||
DATABASE_URL={{ lookup('dotenv', 'DATABASE_URL', file='../../../../.env.production')}}
 | 
			
		||||
PGADMIN_DEFAULT_EMAIL={{ lookup('dotenv', 'PGADMIN_DEFAULT_EMAIL', file='../../../../.env.production')}}
 | 
			
		||||
PGADMIN_DEFAULT_PASSWORD={{ lookup('dotenv', 'PGADMIN_DEFAULT_PASSWORD', file='../../../../.env.production')}}
 | 
			
		||||
 | 
			
		||||
PATREON_CLIENT_ID={{ lookup('dotenv', 'PATREON_CLIENT_ID', file='../../../../.env.production')}}
 | 
			
		||||
PATREON_CLIENT_SECRET={{ lookup('dotenv', 'PATREON_CLIENT_SECRET', file='../../../../.env.production')}}
 | 
			
		||||
 | 
			
		||||
PATREON_API_ORIGIN=https://www.patreon.com
 | 
			
		||||
PATREON_AUTHORIZE_PATH=/oauth2/authorize
 | 
			
		||||
PATREON_TOKEN_PATH=/api/oauth2/token
 | 
			
		||||
 | 
			
		||||
S3_BUCKET=fp-usc
 | 
			
		||||
S3_REGION=us-west-000
 | 
			
		||||
S3_KEY_ID={{ lookup('dotenv', 'S3_KEY_ID', file='../../../../.env.production')}}
 | 
			
		||||
S3_APPLICATION_KEY={{ lookup('dotenv', 'S3_APPLICATION_KEY', file='../../../../.env.production')}}
 | 
			
		||||
S3_ENDPOINT=https://s3.us-west-000.backblazeb2.com
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
CACHE_ROOT='/mnt/vfs/futureporn/our'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										18
									
								
								ansible/roles/our-server/templates/our-server.service.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								ansible/roles/our-server/templates/our-server.service.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,18 @@
 | 
			
		||||
[Unit]
 | 
			
		||||
Description=FuturePorn Our Server
 | 
			
		||||
After=network.target
 | 
			
		||||
 | 
			
		||||
[Service]
 | 
			
		||||
Type=simple
 | 
			
		||||
WorkingDirectory={{ app_dir }}
 | 
			
		||||
ExecStart=/usr/bin/env /usr/bin/npx tsx src/index.ts
 | 
			
		||||
#ExecStart=/usr/bin/env /usr/bin/npx dotenvx run -f {{ env_file }} -- npx tsx src/index.ts
 | 
			
		||||
Restart=always
 | 
			
		||||
RestartSec=5
 | 
			
		||||
User={{ app_user }}
 | 
			
		||||
EnvironmentFile={{ env_file }}
 | 
			
		||||
 | 
			
		||||
[Install]
 | 
			
		||||
WantedBy=multi-user.target
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										16
									
								
								ansible/roles/our-server/templates/our-worker.service.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								ansible/roles/our-server/templates/our-worker.service.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,16 @@
 | 
			
		||||
[Unit]
 | 
			
		||||
Description=FuturePorn Our Worker
 | 
			
		||||
After=network.target
 | 
			
		||||
 | 
			
		||||
[Service]
 | 
			
		||||
Type=simple
 | 
			
		||||
WorkingDirectory={{ app_dir }}
 | 
			
		||||
ExecStart=/usr/bin/env NODE_ENV=production /usr/bin/node dist/worker.js
 | 
			
		||||
Restart=on-failure
 | 
			
		||||
User={{ app_user }}
 | 
			
		||||
EnvironmentFile={{ env_file }}
 | 
			
		||||
StandardOutput=journal
 | 
			
		||||
StandardError=journal
 | 
			
		||||
 | 
			
		||||
[Install]
 | 
			
		||||
WantedBy=multi-user.target
 | 
			
		||||
							
								
								
									
										7
									
								
								ansible/roles/our-server/vars/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								ansible/roles/our-server/vars/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,7 @@
 | 
			
		||||
---
 | 
			
		||||
app_user: futureporn
 | 
			
		||||
app_dir: /opt/futureporn/services/our
 | 
			
		||||
app_entry: dist/main.js
 | 
			
		||||
env_file: /usr/local/etc/futureporn/our/env
 | 
			
		||||
nodejs_version: "20.x"
 | 
			
		||||
our_commit: main
 | 
			
		||||
							
								
								
									
										2
									
								
								ansible/roles/our-worker/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								ansible/roles/our-worker/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,2 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										8
									
								
								ansible/roles/our-worker/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								ansible/roles/our-worker/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,8 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Restart worker
 | 
			
		||||
  ansible.builtin.systemd_service:
 | 
			
		||||
    name: our-worker
 | 
			
		||||
    state: restarted
 | 
			
		||||
    enabled: true
 | 
			
		||||
    daemon_reload: true
 | 
			
		||||
							
								
								
									
										75
									
								
								ansible/roles/our-worker/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										75
									
								
								ansible/roles/our-worker/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,75 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Create futureporn group
 | 
			
		||||
  ansible.builtin.group:
 | 
			
		||||
    name: futureporn
 | 
			
		||||
    state: present
 | 
			
		||||
 | 
			
		||||
- name: Create futureporn user
 | 
			
		||||
  ansible.builtin.user:
 | 
			
		||||
    name: futureporn
 | 
			
		||||
    group: futureporn
 | 
			
		||||
    create_home: true
 | 
			
		||||
    home: /home/futureporn
 | 
			
		||||
    system: true
 | 
			
		||||
 | 
			
		||||
- name: Ensure futureporn directory exists
 | 
			
		||||
  ansible.builtin.file:
 | 
			
		||||
    path: /opt/futureporn
 | 
			
		||||
    state: directory
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
  notify:
 | 
			
		||||
    - restart worker
 | 
			
		||||
 | 
			
		||||
- name: Ensure config directory exists
 | 
			
		||||
  ansible.builtin.file:
 | 
			
		||||
    path: /usr/local/etc/futureporn/our
 | 
			
		||||
    state: directory
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
  notify:
 | 
			
		||||
    - restart worker
 | 
			
		||||
 | 
			
		||||
- name: Generate .env file
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: env.j2
 | 
			
		||||
    dest: "{{ env_file }}"
 | 
			
		||||
    mode: "0600"
 | 
			
		||||
  notify:
 | 
			
		||||
    - restart worker
 | 
			
		||||
 | 
			
		||||
- name: Download Futureporn source code
 | 
			
		||||
  ansible.builtin.git:
 | 
			
		||||
    repo: https://gitea.futureporn.net/futureporn/fp
 | 
			
		||||
    dest: /opt/futureporn
 | 
			
		||||
    version: "{{ our_commit }}"
 | 
			
		||||
    update: true
 | 
			
		||||
  tags:
 | 
			
		||||
    - our
 | 
			
		||||
  notify:
 | 
			
		||||
    - Restart worker
 | 
			
		||||
 | 
			
		||||
- name: Install Our packages based on package.json
 | 
			
		||||
  community.general.npm:
 | 
			
		||||
    path: "{{ app_dir }}"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- name: Install passlib
 | 
			
		||||
  ansible.builtin.pip:
 | 
			
		||||
    name: passlib # dependency of Ansible's passwordhash
 | 
			
		||||
    state: present
 | 
			
		||||
 | 
			
		||||
- name: Create our-worker service
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: our-worker.service.j2
 | 
			
		||||
    dest: /etc/systemd/system/our-worker.service
 | 
			
		||||
    mode: "0644"
 | 
			
		||||
  notify:
 | 
			
		||||
    - restart worker
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- name: Set default UFW policy to deny incoming
 | 
			
		||||
  community.general.ufw:
 | 
			
		||||
    state: enabled
 | 
			
		||||
    policy: deny
 | 
			
		||||
    direction: incoming
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										30
									
								
								ansible/roles/our-worker/templates/env.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								ansible/roles/our-worker/templates/env.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,30 @@
 | 
			
		||||
ORIGIN=https://future.porn
 | 
			
		||||
 | 
			
		||||
COOKIE_SECRET={{ lookup('dotenv', 'COOKIE_SECRET', file='../../../../.env.production')}}
 | 
			
		||||
DB_USER={{ lookup('dotenv', 'DB_USER', file='../../../../.env.production')}}
 | 
			
		||||
DB_PASSWORD={{ lookup('dotenv', 'DB_PASSWORD', file='../../../../.env.production')}}
 | 
			
		||||
DB_NAME=future_porn
 | 
			
		||||
 | 
			
		||||
CDN_ORIGIN=https://fp-usc.b-cdn.net
 | 
			
		||||
CDN_TOKEN_SECRET={{ lookup('dotenv', 'CDN_TOKEN_SECRET', file='../../../../.env.production')}}
 | 
			
		||||
NODE_ENV=production
 | 
			
		||||
 | 
			
		||||
DATABASE_URL={{ lookup('dotenv', 'DATABASE_URL', file='../../../../.env.production')}}
 | 
			
		||||
PGADMIN_DEFAULT_EMAIL={{ lookup('dotenv', 'PGADMIN_DEFAULT_EMAIL', file='../../../../.env.production')}}
 | 
			
		||||
PGADMIN_DEFAULT_PASSWORD={{ lookup('dotenv', 'PGADMIN_DEFAULT_PASSWORD', file='../../../../.env.production')}}
 | 
			
		||||
 | 
			
		||||
PATREON_CLIENT_ID={{ lookup('dotenv', 'PATREON_CLIENT_ID', file='../../../../.env.production')}}
 | 
			
		||||
PATREON_CLIENT_SECRET={{ lookup('dotenv', 'PATREON_CLIENT_SECRET', file='../../../../.env.production')}}
 | 
			
		||||
 | 
			
		||||
PATREON_API_ORIGIN=https://www.patreon.com
 | 
			
		||||
PATREON_AUTHORIZE_PATH=/oauth2/authorize
 | 
			
		||||
PATREON_TOKEN_PATH=/api/oauth2/token
 | 
			
		||||
 | 
			
		||||
S3_BUCKET=fp-usc
 | 
			
		||||
S3_REGION=us-west-000
 | 
			
		||||
S3_KEY_ID={{ lookup('dotenv', 'S3_KEY_ID', file='../../../../.env.production') }}
 | 
			
		||||
S3_APPLICATION_KEY={{ lookup('dotenv', 'S3_APPLICATION_KEY', file='../../../../.env.production')}}
 | 
			
		||||
S3_ENDPOINT=https://s3.us-west-000.backblazeb2.com
 | 
			
		||||
 | 
			
		||||
CACHE_ROOT='/mnt/vfs/futureporn/our'
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										14
									
								
								ansible/roles/our-worker/templates/our-worker.service.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								ansible/roles/our-worker/templates/our-worker.service.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,14 @@
 | 
			
		||||
[Unit]
 | 
			
		||||
Description=FuturePorn Our Worker
 | 
			
		||||
After=network.target
 | 
			
		||||
 | 
			
		||||
[Service]
 | 
			
		||||
Type=simple
 | 
			
		||||
WorkingDirectory={{ app_dir }}
 | 
			
		||||
ExecStart=/usr/bin/env /usr/bin/npx tsx src/worker.ts
 | 
			
		||||
Restart=on-failure
 | 
			
		||||
User={{ app_user }}
 | 
			
		||||
EnvironmentFile={{ env_file }}
 | 
			
		||||
 | 
			
		||||
[Install]
 | 
			
		||||
WantedBy=multi-user.target
 | 
			
		||||
							
								
								
									
										7
									
								
								ansible/roles/our-worker/vars/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								ansible/roles/our-worker/vars/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,7 @@
 | 
			
		||||
---
 | 
			
		||||
app_user: futureporn
 | 
			
		||||
app_dir: /opt/futureporn/services/our
 | 
			
		||||
app_entry: src/worker.ts
 | 
			
		||||
env_file: /usr/local/etc/futureporn/our/env
 | 
			
		||||
nodejs_version: "20.x"
 | 
			
		||||
our_commit: main
 | 
			
		||||
							
								
								
									
										218
									
								
								ansible/roles/our/files/green-blue-example.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										218
									
								
								ansible/roles/our/files/green-blue-example.sh
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,218 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e  # Stop script execution on error
 | 
			
		||||
 | 
			
		||||
NGINX_CONF_PATH="./docker/nginx/active_backend.conf"
 | 
			
		||||
NGINX_CONTAINER="app"
 | 
			
		||||
ENV_FILE=".env"
 | 
			
		||||
 | 
			
		||||
build_containers() {
 | 
			
		||||
    echo "📦 Building Docker containers..."
 | 
			
		||||
    docker compose build
 | 
			
		||||
    echo "✅ Docker containers built successfully."
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
prepare_nginx_config() {
 | 
			
		||||
    if [ ! -d "./docker/nginx" ]; then
 | 
			
		||||
        echo "📂 Nginx directory not found. Creating it..."
 | 
			
		||||
        mkdir -p ./docker/nginx
 | 
			
		||||
        echo "✅ Nginx directory created."
 | 
			
		||||
    fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
update_nginx_config() {
 | 
			
		||||
    local active_color=$1
 | 
			
		||||
    echo "🔄 Updating Nginx configuration to route traffic to '$active_color' containers..."
 | 
			
		||||
 | 
			
		||||
    cat > "$NGINX_CONF_PATH" <<EOL
 | 
			
		||||
upstream app_backend {
 | 
			
		||||
    server $active_color:9000 max_fails=3 fail_timeout=30s;
 | 
			
		||||
}
 | 
			
		||||
EOL
 | 
			
		||||
 | 
			
		||||
    echo "📋 Copying Nginx configuration to the container..."
 | 
			
		||||
    docker cp "$NGINX_CONF_PATH" "$NGINX_CONTAINER:/etc/nginx/conf.d/active_backend.conf"
 | 
			
		||||
    echo "🔁 Reloading Nginx to apply the new configuration..."
 | 
			
		||||
    docker exec "$NGINX_CONTAINER" nginx -s reload >/dev/null 2>&1
 | 
			
		||||
    echo "✅ Nginx configuration updated and reloaded successfully."
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
wait_for_health() {
 | 
			
		||||
    local container_prefix=$1
 | 
			
		||||
    local retries=5
 | 
			
		||||
    local unhealthy_found
 | 
			
		||||
    echo "⏳ Waiting for containers with prefix '$container_prefix' to become healthy..."
 | 
			
		||||
 | 
			
		||||
    while (( retries > 0 )); do
 | 
			
		||||
        unhealthy_found=false
 | 
			
		||||
 | 
			
		||||
        for container_name in $(docker ps --filter "name=$container_prefix" --format "{{.Names}}"); do
 | 
			
		||||
            health_status=$(docker inspect --format '{{if .State.Health}}{{.State.Health.Status}}{{else}}unknown{{end}}' "$container_name" || echo "unknown")
 | 
			
		||||
            if [[ "$health_status" != "healthy" ]]; then
 | 
			
		||||
                unhealthy_found=true
 | 
			
		||||
                echo "🚧 Container '$container_name' is not ready. Current status: $health_status."
 | 
			
		||||
            fi
 | 
			
		||||
        done
 | 
			
		||||
 | 
			
		||||
        if ! $unhealthy_found; then
 | 
			
		||||
            echo "✅ All containers with prefix '$container_prefix' are healthy."
 | 
			
		||||
            return 0
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
        echo "⏳ Retrying... ($retries retries left)"
 | 
			
		||||
        ((retries--))
 | 
			
		||||
        sleep 5
 | 
			
		||||
    done
 | 
			
		||||
 | 
			
		||||
    echo "❌ Error: Some containers with prefix '$container_prefix' are not healthy. Aborting deployment."
 | 
			
		||||
    rollback
 | 
			
		||||
    exit 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
rollback() {
 | 
			
		||||
    echo "🛑 Rolling back deployment. Ensuring the active environment remains intact."
 | 
			
		||||
 | 
			
		||||
    if [ -n "$PREVIOUS_COLOR" ]; then
 | 
			
		||||
        echo "🔄 Restoring CONTAINER_COLOR=$PREVIOUS_COLOR in .env."
 | 
			
		||||
        sed -i.bak "s/^CONTAINER_COLOR=.*/CONTAINER_COLOR=$PREVIOUS_COLOR/" "$ENV_FILE"
 | 
			
		||||
        rm -f "$ENV_FILE.bak"
 | 
			
		||||
        echo "✅ Restored CONTAINER_COLOR=$PREVIOUS_COLOR in .env."
 | 
			
		||||
    else
 | 
			
		||||
        echo "🚧  No previous CONTAINER_COLOR found to restore."
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    if docker ps --filter "name=green" --format "{{.Names}}" | grep -q "green"; then
 | 
			
		||||
        echo "✅ Active environment 'green' remains intact."
 | 
			
		||||
        echo "🛑 Stopping and removing 'blue' containers..."
 | 
			
		||||
        docker compose stop "blue" >/dev/null 2>&1 || true
 | 
			
		||||
        docker compose rm -f "blue" >/dev/null 2>&1 || true
 | 
			
		||||
    elif docker ps --filter "name=blue" --format "{{.Names}}" | grep -q "blue"; then
 | 
			
		||||
        echo "✅ Active environment 'blue' remains intact."
 | 
			
		||||
        echo "🛑 Stopping and removing 'green' containers..."
 | 
			
		||||
        docker compose stop "green" >/dev/null 2>&1 || true
 | 
			
		||||
        docker compose rm -f "green" >/dev/null 2>&1 || true
 | 
			
		||||
    else
 | 
			
		||||
        echo "❌ No active environment detected after rollback. Manual intervention might be needed."
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    echo "🔄 Rollback completed."
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
update_env_file() {
 | 
			
		||||
    local active_color=$1
 | 
			
		||||
 | 
			
		||||
    # check if .env file exists
 | 
			
		||||
    if [ ! -f "$ENV_FILE" ]; then
 | 
			
		||||
        echo "❌ .env file not found. Creating a new one..."
 | 
			
		||||
        echo "CONTAINER_COLOR=$active_color" > "$ENV_FILE"
 | 
			
		||||
        echo "✅ Created .env file with CONTAINER_COLOR=$active_color."
 | 
			
		||||
        return
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    # backup previous CONTAINER_COLOR value
 | 
			
		||||
    if grep -q "^CONTAINER_COLOR=" "$ENV_FILE"; then
 | 
			
		||||
        PREVIOUS_COLOR=$(grep "^CONTAINER_COLOR=" "$ENV_FILE" | cut -d '=' -f 2)
 | 
			
		||||
        echo "♻️  Backing up previous CONTAINER_COLOR=$PREVIOUS_COLOR."
 | 
			
		||||
    else
 | 
			
		||||
        PREVIOUS_COLOR=""
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    # update CONTAINER_COLOR value in .env
 | 
			
		||||
    if grep -q "^CONTAINER_COLOR=" "$ENV_FILE"; then
 | 
			
		||||
        sed -i.bak "s/^CONTAINER_COLOR=.*/CONTAINER_COLOR=$active_color/" "$ENV_FILE"
 | 
			
		||||
        echo "🔄 Updated CONTAINER_COLOR=$active_color in .env"
 | 
			
		||||
    else
 | 
			
		||||
        echo "CONTAINER_COLOR=$active_color" >> "$ENV_FILE"
 | 
			
		||||
        echo "🖋️ Added CONTAINER_COLOR=$active_color to .env"
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    # remove backup file
 | 
			
		||||
    if [ -f "$ENV_FILE.bak" ]; then
 | 
			
		||||
        rm "$ENV_FILE.bak"
 | 
			
		||||
    fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
install_dependencies() {
 | 
			
		||||
    local container=$1
 | 
			
		||||
    echo "📥 Installing dependencies in container '$container'..."
 | 
			
		||||
 | 
			
		||||
    # Install Laravel dependencies
 | 
			
		||||
    docker exec -u root -it "$container" bash -c "composer install --no-dev --optimize-autoloader"
 | 
			
		||||
    docker exec -u root -it "$container" bash -c "mkdir -p database && touch database/database.sqlite"
 | 
			
		||||
 | 
			
		||||
    # Permissions setup
 | 
			
		||||
    docker exec -u root -it "$container" bash -c "chown www-data:www-data -R ./storage ./bootstrap ./database"
 | 
			
		||||
    docker exec -u root -it "$container" bash -c "chmod -R 775 ./storage ./bootstrap/cache"
 | 
			
		||||
 | 
			
		||||
    # Clear caches and run migrations
 | 
			
		||||
    docker exec -u root -it "$container" bash -c "php artisan cache:clear"
 | 
			
		||||
    docker exec -u root -it "$container" bash -c "php artisan config:clear"
 | 
			
		||||
    docker exec -u root -it "$container" bash -c "php artisan route:clear"
 | 
			
		||||
    docker exec -u root -it "$container" bash -c "php artisan view:clear"
 | 
			
		||||
    docker exec -u root -it "$container" bash -c "php artisan migrate --force"
 | 
			
		||||
 | 
			
		||||
    echo "✅ Dependencies installed and database initialized successfully in container '$container'."
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
deploy() {
 | 
			
		||||
    local active=$1
 | 
			
		||||
    local new=$2
 | 
			
		||||
 | 
			
		||||
    # Update .env before deploying
 | 
			
		||||
    update_env_file "$new"
 | 
			
		||||
    echo "🚀 Starting deployment. Current active environment: '$active'. Deploying to '$new'..."
 | 
			
		||||
    docker compose --profile "$new" up -d
 | 
			
		||||
    wait_for_health "$new"
 | 
			
		||||
    install_dependencies "$new"
 | 
			
		||||
    update_nginx_config "$new"
 | 
			
		||||
    echo "🗑️  Removing old environment: '$active'..."
 | 
			
		||||
    echo "🛑 Stopping '$active' containers..."
 | 
			
		||||
    docker compose stop $active >/dev/null 2>&1 || true
 | 
			
		||||
    echo "🗑️  Removing '$active' containers..."
 | 
			
		||||
    docker compose rm -f $active >/dev/null 2>&1 || true
 | 
			
		||||
    update_env_file "$new"
 | 
			
		||||
    echo "✅ Deployment to '$new' completed successfully."
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
get_active_container() {
 | 
			
		||||
    if [ -f "$ENV_FILE" ] && grep -q "CONTAINER_COLOR" "$ENV_FILE"; then
 | 
			
		||||
        grep "CONTAINER_COLOR" "$ENV_FILE" | cut -d '=' -f 2
 | 
			
		||||
    else
 | 
			
		||||
        echo ""
 | 
			
		||||
    fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Main script logic
 | 
			
		||||
prepare_nginx_config
 | 
			
		||||
build_containers
 | 
			
		||||
 | 
			
		||||
ACTIVE_COLOR=$(get_active_container)
 | 
			
		||||
 | 
			
		||||
if [ -z "$ACTIVE_COLOR" ]; then
 | 
			
		||||
    # if no active container found, deploy 'blue'
 | 
			
		||||
    echo "🟦 Initial setup. Bringing up 'blue' containers..."
 | 
			
		||||
    docker compose --profile blue up -d
 | 
			
		||||
    wait_for_health "blue"
 | 
			
		||||
    install_dependencies "blue"
 | 
			
		||||
    update_nginx_config "blue"
 | 
			
		||||
    update_env_file "blue"
 | 
			
		||||
elif [ "$ACTIVE_COLOR" == "green" ]; then
 | 
			
		||||
    # if the active is 'green', deploy 'blue'
 | 
			
		||||
    PREVIOUS_COLOR="green"
 | 
			
		||||
    deploy "green" "blue"
 | 
			
		||||
elif [ "$ACTIVE_COLOR" == "blue" ]; then
 | 
			
		||||
    # if the active is 'blue', deploy 'green'
 | 
			
		||||
    PREVIOUS_COLOR="blue"
 | 
			
		||||
    deploy "blue" "green"
 | 
			
		||||
else
 | 
			
		||||
    # if the active is neither 'green' nor 'blue', reset to 'blue'
 | 
			
		||||
    echo "🚧 Unexpected CONTAINER_COLOR value. Resetting to 'blue'..."
 | 
			
		||||
    PREVIOUS_COLOR=""
 | 
			
		||||
    docker compose --profile blue up -d
 | 
			
		||||
    wait_for_health "blue"
 | 
			
		||||
    install_dependencies "blue"
 | 
			
		||||
    update_nginx_config "blue"
 | 
			
		||||
    update_env_file "blue"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
echo "🎉 Deployment successful!"
 | 
			
		||||
							
								
								
									
										8
									
								
								ansible/roles/our/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								ansible/roles/our/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,8 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Mount vfs
 | 
			
		||||
  ansible.posix.mount:
 | 
			
		||||
    src: "{{ vfs_mount_tag }}"
 | 
			
		||||
    path: /mnt/vfs
 | 
			
		||||
    fstype: virtiofs
 | 
			
		||||
    state: mounted
 | 
			
		||||
							
								
								
									
										60
									
								
								ansible/roles/our/tasks/deploy.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										60
									
								
								ansible/roles/our/tasks/deploy.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,60 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Create directory
 | 
			
		||||
  ansible.builtin.file:
 | 
			
		||||
    path: /etc/futureporn/our
 | 
			
		||||
    state: directory
 | 
			
		||||
    mode: "0755"
 | 
			
		||||
 | 
			
		||||
- name: Copy env file
 | 
			
		||||
  ansible.builtin.copy:
 | 
			
		||||
    src: ../../../../.env.production
 | 
			
		||||
    dest: /etc/futureporn/our/.env
 | 
			
		||||
    mode: "0600"
 | 
			
		||||
 | 
			
		||||
- name: Clone the latest code
 | 
			
		||||
  ansible.builtin.git:
 | 
			
		||||
    repo: https://gitea.futureporn.net/futureporn/fp
 | 
			
		||||
    dest: /tmp/checkout
 | 
			
		||||
    single_branch: true
 | 
			
		||||
    version: main
 | 
			
		||||
    clone: true
 | 
			
		||||
    force: true
 | 
			
		||||
 | 
			
		||||
- name: Copy the compose file
 | 
			
		||||
  ansible.builtin.copy:
 | 
			
		||||
    remote_src: true
 | 
			
		||||
    src: /tmp/checkout/services/our/compose.production.yaml
 | 
			
		||||
    dest: /etc/futureporn/our/compose.production.yaml
 | 
			
		||||
    mode: "0744"
 | 
			
		||||
 | 
			
		||||
- name: Deploy stack to green
 | 
			
		||||
  community.docker.docker_stack:
 | 
			
		||||
    state: present
 | 
			
		||||
    name: our-green
 | 
			
		||||
    compose:
 | 
			
		||||
      - /etc/futureporn/our/compose.production.yaml
 | 
			
		||||
      - services:
 | 
			
		||||
          server:
 | 
			
		||||
            ports:
 | 
			
		||||
              - target: 5000 # container port
 | 
			
		||||
                published: 8084 # Swarm ingress port
 | 
			
		||||
                protocol: tcp
 | 
			
		||||
                mode: ingress
 | 
			
		||||
 | 
			
		||||
- name: Deploy stack to blue
 | 
			
		||||
  community.docker.docker_stack:
 | 
			
		||||
    state: present
 | 
			
		||||
    name: our-blue
 | 
			
		||||
    compose:
 | 
			
		||||
      - /etc/futureporn/our/compose.production.yaml
 | 
			
		||||
      - services:
 | 
			
		||||
          server:
 | 
			
		||||
            ports:
 | 
			
		||||
              - target: 5000 # container port
 | 
			
		||||
                published: 8085 # Swarm ingress port
 | 
			
		||||
                protocol: tcp
 | 
			
		||||
                mode: ingress
 | 
			
		||||
# - name: Remove stack
 | 
			
		||||
#   community.docker.docker_stack:
 | 
			
		||||
#     name: mystack
 | 
			
		||||
#     state: absent
 | 
			
		||||
							
								
								
									
										11
									
								
								ansible/roles/our/tasks/filesystem.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								ansible/roles/our/tasks/filesystem.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,11 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Get VFS mount tag
 | 
			
		||||
  ansible.builtin.set_fact:
 | 
			
		||||
    our_vfs_mount_tag: "{{ lookup('dotenv', 'VULTR_VFS_MOUNT_TAG', file='../../../../.env.production') }}"
 | 
			
		||||
 | 
			
		||||
- name: Mount VFS
 | 
			
		||||
  ansible.posix.mount:
 | 
			
		||||
    src: "{{ our_vfs_mount_tag }}"
 | 
			
		||||
    path: /mnt/vfs
 | 
			
		||||
    fstype: virtiofs
 | 
			
		||||
    state: mounted
 | 
			
		||||
							
								
								
									
										13
									
								
								ansible/roles/our/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								ansible/roles/our/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,13 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Configure filesystem
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: filesystem.yml
 | 
			
		||||
 | 
			
		||||
- name: Configure docker stack
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: stack.yml
 | 
			
		||||
 | 
			
		||||
- name: Deploy our
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: deploy.yml
 | 
			
		||||
							
								
								
									
										46
									
								
								ansible/roles/our/tasks/old.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										46
									
								
								ansible/roles/our/tasks/old.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,46 @@
 | 
			
		||||
---
 | 
			
		||||
# Terraform Vultr provider doesn't expose the mount_tag.
 | 
			
		||||
# it does however expose the vfs id, which we save to ansible host vars at time of `tofu apply`.
 | 
			
		||||
# As a workaround, we use Vultr api to fetch the mount_tag, and mount the vfs to the instance.
 | 
			
		||||
- name: Get the VFS data
 | 
			
		||||
  ansible.builtin.uri:
 | 
			
		||||
    url: https://api.vultr.com/v2/vfs
 | 
			
		||||
    method: GET
 | 
			
		||||
    status_code: 200
 | 
			
		||||
    headers:
 | 
			
		||||
      Authorization: "Bearer {{ lookup('dotenv', 'VULTR_API_KEY', file='../.env') }}"
 | 
			
		||||
  register: vfs_list
 | 
			
		||||
 | 
			
		||||
- name: Get VFS variables
 | 
			
		||||
  ansible.builtin.set_fact:
 | 
			
		||||
    our_vfs_id: "{{ vfs_list.json.vfs | selectattr('tags', 'contains', 'our') | map(attribute='id') | first }}"
 | 
			
		||||
 | 
			
		||||
- name: Debug the our VFS id
 | 
			
		||||
  ansible.builtin.debug:
 | 
			
		||||
    msg: "The VFS ID for 'our' is {{ our_vfs_id }}"
 | 
			
		||||
 | 
			
		||||
- name: Attach VFS to Vultr instance
 | 
			
		||||
  ansible.builtin.uri:
 | 
			
		||||
    url: https://api.vultr.com/v2/vfs/{{ vultr_vfs_storage_id }}/attachments/{{ hostvars[inventory_hostname]['vultr_instance_id'] }}
 | 
			
		||||
    method: PUT
 | 
			
		||||
    status_code:
 | 
			
		||||
      - 200
 | 
			
		||||
      - 201
 | 
			
		||||
      - 409
 | 
			
		||||
    headers:
 | 
			
		||||
      Authorization: "Bearer {{ lookup('dotenv', 'VULTR_API_KEY', file='../.env') }}"
 | 
			
		||||
  register: vfs_attach
 | 
			
		||||
  changed_when:
 | 
			
		||||
    - vfs_attach.json is defined
 | 
			
		||||
    - "'state' in vfs_attach.json"
 | 
			
		||||
    - vfs_attach.json.state == "ATTACHED"
 | 
			
		||||
  notify:
 | 
			
		||||
    - Mount vfs
 | 
			
		||||
 | 
			
		||||
- name: Debug vfs_attach
 | 
			
		||||
  ansible.builtin.debug:
 | 
			
		||||
    var: vfs_attach
 | 
			
		||||
 | 
			
		||||
- name: Get the VFS mount_tag
 | 
			
		||||
  ansible.builtin.set_fact:
 | 
			
		||||
    vfs_mount_tag: "{{ vfs_attach.json.mount_tag | default('') }}"
 | 
			
		||||
							
								
								
									
										1
									
								
								ansible/roles/our/tasks/stack.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								ansible/roles/our/tasks/stack.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1 @@
 | 
			
		||||
---
 | 
			
		||||
							
								
								
									
										3
									
								
								ansible/roles/swarm/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								ansible/roles/swarm/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,3 @@
 | 
			
		||||
---
 | 
			
		||||
swarm_enable_manager: false
 | 
			
		||||
swarm_enable_worker: false
 | 
			
		||||
							
								
								
									
										11
									
								
								ansible/roles/swarm/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								ansible/roles/swarm/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,11 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Configure swarm manager
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: manager.yml
 | 
			
		||||
  when: swarm_enable_manager
 | 
			
		||||
 | 
			
		||||
- name: Configure swarm worker
 | 
			
		||||
  ansible.builtin.include_tasks:
 | 
			
		||||
    file: worker.yml
 | 
			
		||||
  when: swarm_enable_worker
 | 
			
		||||
							
								
								
									
										24
									
								
								ansible/roles/swarm/tasks/manager.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								ansible/roles/swarm/tasks/manager.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,24 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Init a new swarm with default parameters
 | 
			
		||||
  community.docker.docker_swarm:
 | 
			
		||||
    state: present
 | 
			
		||||
    listen_addr: "{{ internal_ip }}:2377"
 | 
			
		||||
    advertise_addr: "{{ internal_ip }}:4567"
 | 
			
		||||
  register: swarm_create
 | 
			
		||||
 | 
			
		||||
- name: Set join tokens as host facts (manager only)
 | 
			
		||||
  set_fact:
 | 
			
		||||
    swarm_worker_join_token: "{{ swarm_create.swarm_facts.JoinTokens.Worker }}"
 | 
			
		||||
    swarm_manager_join_token: "{{ swarm_create.swarm_facts.JoinTokens.Manager }}"
 | 
			
		||||
 | 
			
		||||
- name: Debug
 | 
			
		||||
  ansible.builtin.debug:
 | 
			
		||||
    var: swarm_create
 | 
			
		||||
 | 
			
		||||
- name: Get worker join token
 | 
			
		||||
  ansible.builtin.set_fact:
 | 
			
		||||
    swarm_worker_join_token: "{{ swarm_create.swarm_facts.JoinTokens.Worker }}"
 | 
			
		||||
 | 
			
		||||
- name: Get manager join token
 | 
			
		||||
  ansible.builtin.set_fact:
 | 
			
		||||
    swarm_manager_join_token: "{{ swarm_create.swarm_facts.JoinTokens.Manager }}"
 | 
			
		||||
							
								
								
									
										19
									
								
								ansible/roles/swarm/tasks/worker.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								ansible/roles/swarm/tasks/worker.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,19 @@
 | 
			
		||||
---
 | 
			
		||||
- debug:
 | 
			
		||||
    var: groups['swarm-node']
 | 
			
		||||
 | 
			
		||||
- name: Get all swarm nodes except the first one
 | 
			
		||||
  set_fact:
 | 
			
		||||
    swarm_worker_ips: "{{ groups['swarm-node'][1:] }}"
 | 
			
		||||
 | 
			
		||||
- name: Join worker nodes
 | 
			
		||||
  community.docker.docker_swarm:
 | 
			
		||||
    state: join
 | 
			
		||||
    advertise_addr: "{{ internal_ip }}:4567"
 | 
			
		||||
    join_token: "{{ hostvars[groups['swarm-node'] | first]['swarm_worker_join_token'] }}"
 | 
			
		||||
    remote_addrs: swarm_worker_ips
 | 
			
		||||
# - name: Join swarm as worker
 | 
			
		||||
#     community.docker.docker_swarm:
 | 
			
		||||
#       state: joined
 | 
			
		||||
#       join_token: "{{ hostvars[groups['swarm-node'] | first].swarm_worker_join_token }}"
 | 
			
		||||
#       remote_addrs: ["{{ hostvars[groups['swarm-node'] | first].internal_ip }}:2377"]
 | 
			
		||||
							
								
								
									
										21
									
								
								ansible/roles/tailscale/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								ansible/roles/tailscale/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,21 @@
 | 
			
		||||
 | 
			
		||||
- name: Add Tailscale's GPG key
 | 
			
		||||
  ansible.builtin.get_url:
 | 
			
		||||
    url: https://pkgs.tailscale.com/stable/ubuntu/noble.noarmor.gpg
 | 
			
		||||
    dest: /usr/share/keyrings/tailscale-archive-keyring.gpg
 | 
			
		||||
    mode: '0644'
 | 
			
		||||
 | 
			
		||||
- name: Add Tailscale apt repository
 | 
			
		||||
  ansible.builtin.get_url:
 | 
			
		||||
    url: https://pkgs.tailscale.com/stable/ubuntu/noble.tailscale-keyring.list
 | 
			
		||||
    dest: /etc/apt/sources.list.d/tailscale.list
 | 
			
		||||
    mode: '0644'
 | 
			
		||||
 | 
			
		||||
- name: Update apt cache
 | 
			
		||||
  ansible.builtin.apt:
 | 
			
		||||
    update_cache: yes
 | 
			
		||||
 | 
			
		||||
- name: Install tailscale
 | 
			
		||||
  ansible.builtin.apt:
 | 
			
		||||
    name: tailscale
 | 
			
		||||
    state: present
 | 
			
		||||
							
								
								
									
										20
									
								
								ansible/roles/uppy/tasks/caddy.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								ansible/roles/uppy/tasks/caddy.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,20 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Configure firewall
 | 
			
		||||
  community.general.ufw:
 | 
			
		||||
    rule: allow
 | 
			
		||||
    port: "{{ item }}"
 | 
			
		||||
    proto: tcp
 | 
			
		||||
  loop:
 | 
			
		||||
    - 80
 | 
			
		||||
    - 443
 | 
			
		||||
 | 
			
		||||
- name: Install Caddy
 | 
			
		||||
  ansible.builtin.import_role:
 | 
			
		||||
    name: nvjacobo.caddy
 | 
			
		||||
 | 
			
		||||
- name: Configure Caddyfile
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: "templates/Caddyfile.j2"
 | 
			
		||||
    dest: /etc/caddy/Caddyfile
 | 
			
		||||
    mode: "0644"
 | 
			
		||||
  notify: restart caddy # nvjacobo.caddy handles this
 | 
			
		||||
							
								
								
									
										44
									
								
								ansible/roles/uppy/tasks/companion.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								ansible/roles/uppy/tasks/companion.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,44 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Setup volume
 | 
			
		||||
  community.docker.docker_volume:
 | 
			
		||||
    name: pg_data
 | 
			
		||||
 | 
			
		||||
- name: Setup docker container
 | 
			
		||||
  community.docker.docker_container:
 | 
			
		||||
    name: uppy-companion
 | 
			
		||||
    image: transloadit/companion
 | 
			
		||||
    pull: missing
 | 
			
		||||
    state: started
 | 
			
		||||
    ports:
 | 
			
		||||
      - "3020:3020"
 | 
			
		||||
    env:
 | 
			
		||||
      NODE_ENV: prod
 | 
			
		||||
      COMPANION_PORT: "{{ lookup('dotenv', 'COMPANION_PORT', file='../.env') }}"
 | 
			
		||||
      COMPANION_DOMAIN: "{{ lookup('dotenv', 'COMPANION_DOMAIN', file='../.env') }}"
 | 
			
		||||
      COMPANION_SELF_ENDPOINT: "{{ lookup('dotenv', 'COMPANION_SELF_ENDPOINT', file='../.env') }}"
 | 
			
		||||
      COMPANION_HIDE_METRICS: "{{ lookup('dotenv', 'COMPANION_HIDE_METRICS', file='../.env') }}"
 | 
			
		||||
      COMPANION_HIDE_WELCOME: "{{ lookup('dotenv', 'COMPANION_HIDE_WELCOME', file='../.env') }}"
 | 
			
		||||
      COMPANION_STREAMING_UPLOAD: "{{ lookup('dotenv', 'COMPANION_STREAMING_UPLOAD', file='../.env') }}"
 | 
			
		||||
      COMPANION_TUS_DEFERRED_UPLOAD_LENGTH: "{{ lookup('dotenv', 'COMPANION_TUS_DEFERRED_UPLOAD_LENGTH', file='../.env') }}"
 | 
			
		||||
      COMPANION_CLIENT_ORIGINS: "{{ lookup('dotenv', 'COMPANION_CLIENT_ORIGINS', file='../.env') }}"
 | 
			
		||||
      COMPANION_PROTOCOL: "{{ lookup('dotenv', 'COMPANION_PROTOCOL', file='../.env') }}"
 | 
			
		||||
      COMPANION_DATADIR: /mnt/uppy-server-data
 | 
			
		||||
      COMPANION_SECRET: "{{ lookup('dotenv', 'COMPANION_SECRET', file='../.env') }}"
 | 
			
		||||
      COMPANION_PREAUTH_SECRET: "{{ lookup('dotenv', 'COMPANION_PREAUTH_SECRET', file='../.env') }}"
 | 
			
		||||
      COMPANION_AWS_KEY: "{{ lookup('dotenv', 'COMPANION_AWS_KEY', file='../.env') }}"
 | 
			
		||||
      COMPANION_AWS_SECRET: "{{ lookup('dotenv', 'COMPANION_AWS_SECRET', file='../.env') }}"
 | 
			
		||||
      COMPANION_AWS_BUCKET: "{{ lookup('dotenv', 'COMPANION_AWS_BUCKET', file='../.env') }}"
 | 
			
		||||
      COMPANION_AWS_ENDPOINT: "{{ lookup('dotenv', 'COMPANION_AWS_ENDPOINT', file='../.env') }}"
 | 
			
		||||
      COMPANION_AWS_REGION: "{{ lookup('dotenv', 'COMPANION_AWS_REGION', file='../.env') }}"
 | 
			
		||||
      COMPANION_AWS_FORCE_PATH_STYLE: "false"
 | 
			
		||||
      COMPANION_AWS_PREFIX: usc/
 | 
			
		||||
    mounts:
 | 
			
		||||
      - type: volume
 | 
			
		||||
        target: "/mnt/uppy-server-data"
 | 
			
		||||
        source: "uppy_data"
 | 
			
		||||
# - name: Allow VPC2.0 network access
 | 
			
		||||
#   community.general.ufw:
 | 
			
		||||
#     rule: allow
 | 
			
		||||
#     port: '5432'
 | 
			
		||||
#     proto: tcp
 | 
			
		||||
#     from: 10.2.128.0/20
 | 
			
		||||
							
								
								
									
										15
									
								
								ansible/roles/uppy/templates/Caddyfile.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								ansible/roles/uppy/templates/Caddyfile.j2
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,15 @@
 | 
			
		||||
uppy.futureporn.net {
 | 
			
		||||
 | 
			
		||||
	# Define the upstream servers for load balancing
 | 
			
		||||
	reverse_proxy :3020 {
 | 
			
		||||
 | 
			
		||||
		# Health checks (optional)
 | 
			
		||||
		health_uri /metrics
 | 
			
		||||
		health_interval 10s
 | 
			
		||||
		health_timeout 5s
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	handle_errors {
 | 
			
		||||
		respond "💥 Error ~ {err.status_code} {err.status_text}"
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										122
									
								
								ansible/site.yml
									
									
									
									
									
								
							
							
						
						
									
										122
									
								
								ansible/site.yml
									
									
									
									
									
								
							@ -1,7 +1,7 @@
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
- name: Bootstrap
 | 
			
		||||
  hosts: our
 | 
			
		||||
  hosts: all
 | 
			
		||||
  gather_facts: false ## required because ansible_host may not have python
 | 
			
		||||
  check_mode: false
 | 
			
		||||
  become: false
 | 
			
		||||
@ -9,114 +9,82 @@
 | 
			
		||||
    - bootstrap
 | 
			
		||||
 | 
			
		||||
- name: Assert common dependencies
 | 
			
		||||
  hosts: our
 | 
			
		||||
  hosts: swarm-node
 | 
			
		||||
  gather_facts: true
 | 
			
		||||
  check_mode: false
 | 
			
		||||
  become: true
 | 
			
		||||
  roles:
 | 
			
		||||
    - common
 | 
			
		||||
    - docker
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# the decision of worker vs manager is set in ansible inventory by opentofu
 | 
			
		||||
- name: Set up docker swarm
 | 
			
		||||
  hosts: swarm-node
 | 
			
		||||
  gather_facts: true
 | 
			
		||||
  roles:
 | 
			
		||||
    - swarm
 | 
			
		||||
 | 
			
		||||
- name: Assert our dependencies
 | 
			
		||||
  hosts: our
 | 
			
		||||
  hosts: swarm-node
 | 
			
		||||
  gather_facts: true
 | 
			
		||||
  check_mode: false
 | 
			
		||||
  become: true
 | 
			
		||||
  roles:
 | 
			
		||||
    - our
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- name: Install Komodo Service
 | 
			
		||||
  hosts: our
 | 
			
		||||
  vars_files:
 | 
			
		||||
    - vars/main.yml
 | 
			
		||||
  vars:
 | 
			
		||||
    komodo_allowed_ips: "[\"::ffff:{{ lookup('dotenv', 'KOMODO_ALLOWED_IPS', file='../.env.production') }}\"]"
 | 
			
		||||
 | 
			
		||||
  roles:
 | 
			
		||||
    - role: bpbradley.komodo
 | 
			
		||||
      komodo_action: "install"
 | 
			
		||||
      komodo_version: "latest"
 | 
			
		||||
      komodo_passkeys: "{{ lookup('dotenv', 'KOMODO_PASSKEYS', file='../.env.production') }}"
 | 
			
		||||
      komodo_core_url: "{{ lookup('dotenv', 'KOMODO_CORE_URL', file='../.env.production') }}"
 | 
			
		||||
      komodo_core_api_key: "{{ lookup('dotenv', 'KOMODO_CORE_API_KEY', file='../.env.production') }}"
 | 
			
		||||
      komodo_core_api_secret: "{{ lookup('dotenv', 'KOMODO_CORE_API_SECRET', file='../.env.production') }}"
 | 
			
		||||
      enable_server_management: true
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: debug
 | 
			
		||||
      ansible.builtin.debug:
 | 
			
		||||
        msg:
 | 
			
		||||
          komodo_allowed_ips: "{{ komodo_allowed_ips }}"
 | 
			
		||||
    - name: Allow port 8120 TCP from Komodo core
 | 
			
		||||
      community.general.ufw:
 | 
			
		||||
        rule: allow
 | 
			
		||||
        port: "8120"
 | 
			
		||||
        proto: tcp
 | 
			
		||||
        from_ip: "{{ lookup('dotenv', 'KOMODO_ALLOWED_IPS', file='../.env.production') }}"
 | 
			
		||||
 | 
			
		||||
- name: Configure database
 | 
			
		||||
  hosts: database
 | 
			
		||||
  gather_facts: true
 | 
			
		||||
  check_mode: false
 | 
			
		||||
  become: false
 | 
			
		||||
  roles:
 | 
			
		||||
    - database
 | 
			
		||||
  tags:
 | 
			
		||||
    - database
 | 
			
		||||
 | 
			
		||||
- name: Configure Our Server
 | 
			
		||||
  hosts: our-server
 | 
			
		||||
  gather_facts: true
 | 
			
		||||
  check_mode: false
 | 
			
		||||
  become: false
 | 
			
		||||
  tags:
 | 
			
		||||
    - our-server
 | 
			
		||||
  roles:
 | 
			
		||||
    - geerlingguy.nodejs
 | 
			
		||||
    - our-server
 | 
			
		||||
 | 
			
		||||
- name: Configure Our Worker
 | 
			
		||||
  hosts: our-worker
 | 
			
		||||
  gather_facts: true
 | 
			
		||||
  check_mode: false
 | 
			
		||||
  become: false
 | 
			
		||||
  tags:
 | 
			
		||||
    - our-worker
 | 
			
		||||
  roles:
 | 
			
		||||
    - geerlingguy.nodejs
 | 
			
		||||
    - our-worker
 | 
			
		||||
 | 
			
		||||
- name: Configure load balancer
 | 
			
		||||
  hosts: load_balancer
 | 
			
		||||
  hosts: loadbalancer
 | 
			
		||||
  gather_facts: true
 | 
			
		||||
  check_mode: false
 | 
			
		||||
  become: false
 | 
			
		||||
  roles:
 | 
			
		||||
    - load_balancer
 | 
			
		||||
    - loadbalancer
 | 
			
		||||
  vars_files:
 | 
			
		||||
    - vars/main.yml
 | 
			
		||||
  tags:
 | 
			
		||||
    - lb
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- name: Set up b2-cli
 | 
			
		||||
  hosts: ipfs
 | 
			
		||||
  gather_facts: true
 | 
			
		||||
  roles:
 | 
			
		||||
    - backblaze
 | 
			
		||||
  tags:
 | 
			
		||||
    - b2
 | 
			
		||||
 | 
			
		||||
- name: Set up IPFS
 | 
			
		||||
  hosts: ipfs
 | 
			
		||||
  gather_facts: true
 | 
			
		||||
  vars:
 | 
			
		||||
    ipfs_enable_blockstorage: true
 | 
			
		||||
    ipfs_enable_kubo: true
 | 
			
		||||
    ipfs_enable_ipfs_cluster_service: false
 | 
			
		||||
    ipfs_enable_ipfs_cluster_follow: true
 | 
			
		||||
    ipfs_enable_b2_cli: trure
 | 
			
		||||
  roles:
 | 
			
		||||
    - ipfs
 | 
			
		||||
  tags:
 | 
			
		||||
    - capture
 | 
			
		||||
    - ipfs
 | 
			
		||||
 | 
			
		||||
- name: Install Capture instance
 | 
			
		||||
  hosts: capture
 | 
			
		||||
  gather_facts: true
 | 
			
		||||
  check_mode: false
 | 
			
		||||
  become: false
 | 
			
		||||
  roles:
 | 
			
		||||
    - capture
 | 
			
		||||
  tags:
 | 
			
		||||
    - capture
 | 
			
		||||
 | 
			
		||||
# - name: Set up our app
 | 
			
		||||
#   hosts: swarm-node
 | 
			
		||||
#   gather_facts: true
 | 
			
		||||
#   roles:
 | 
			
		||||
#     - our
 | 
			
		||||
#   tags:
 | 
			
		||||
#     - our
 | 
			
		||||
 | 
			
		||||
# - name: Install Capture instance
 | 
			
		||||
#   hosts: capture
 | 
			
		||||
#   gather_facts: true
 | 
			
		||||
#   check_mode: false
 | 
			
		||||
#   become: false
 | 
			
		||||
#   roles:
 | 
			
		||||
#     - capture
 | 
			
		||||
#   tags:
 | 
			
		||||
#     - capture
 | 
			
		||||
 | 
			
		||||
# - name: Configure tracker
 | 
			
		||||
#   hosts: tracker
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										28
									
								
								ansible/vars/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								ansible/vars/main.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,28 @@
 | 
			
		||||
---
 | 
			
		||||
# infisical_project_id: "{{ lookup('dotenv', 'INFISICAL_PROJECT_ID', file='../.env') }}"
 | 
			
		||||
# infisical_client_id: "{{ lookup('dotenv', 'INFISICAL_CLIENT_ID', file='../.env') }}"
 | 
			
		||||
# infisical_client_secret: "{{ lookup('dotenv', 'INFISICAL_CLIENT_SECRET', file='../.env') }}"
 | 
			
		||||
# infisical_url: "{{ lookup('dotenv', 'INFISICAL_URL', file='../.env' )}}"
 | 
			
		||||
# infisical_env_slug: prod
 | 
			
		||||
# infisical_secrets: >-
 | 
			
		||||
#   {{ 
 | 
			
		||||
#     lookup(
 | 
			
		||||
#       'infisical.vault.read_secrets',
 | 
			
		||||
#       universal_auth_client_id=infisical_client_id,
 | 
			
		||||
#       universal_auth_client_secret=infisical_client_secret,
 | 
			
		||||
#       project_id=infisical_project_id,
 | 
			
		||||
#       env_slug=infisical_env_slug,
 | 
			
		||||
#       path='/',
 | 
			
		||||
#       url=infisical_url,
 | 
			
		||||
#       wantlist=true
 | 
			
		||||
#     )
 | 
			
		||||
#     | ansible.builtin.items2dict
 | 
			
		||||
#   }}
 | 
			
		||||
 | 
			
		||||
s3_region: us-west-000
 | 
			
		||||
s3_endpoint: https://s3.us-west-000.backblazeb2.com
 | 
			
		||||
kubo_version: v0.34.1
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
our_server_port: 3000
 | 
			
		||||
@ -9,7 +9,7 @@ locals {
 | 
			
		||||
variable "ipfs_hosts" {
 | 
			
		||||
  description = "List of IP addresses for IPFS nodes"
 | 
			
		||||
  type        = list(string)
 | 
			
		||||
  default     = ["161.97.186.203", "38.242.193.246"]
 | 
			
		||||
  default     = ["38.242.193.246"]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -17,9 +17,6 @@ variable "our_port" {
 | 
			
		||||
  default = "5000"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
variable "database_host" {
 | 
			
		||||
  default = "10.2.128.4"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
variable "public_s3_endpoint" {
 | 
			
		||||
  default = "https://fp-usc.b-cdn.net"
 | 
			
		||||
@ -124,33 +121,52 @@ resource "bunnynet_dns_zone" "future_porn" {
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# load balancing instance
 | 
			
		||||
resource "vultr_instance" "load_balancer" {
 | 
			
		||||
# resource "vultr_instance" "loadbalancer" {
 | 
			
		||||
#   count           = 1
 | 
			
		||||
#   hostname        = "fp-lb-${count.index}"
 | 
			
		||||
#   plan            = "vc2-1c-2gb"
 | 
			
		||||
#   region          = "ord"
 | 
			
		||||
#   backups         = "disabled"
 | 
			
		||||
#   ddos_protection = "false"
 | 
			
		||||
#   os_id           = 1743
 | 
			
		||||
#   enable_ipv6     = true
 | 
			
		||||
#   label           = "fp lb ${count.index}"
 | 
			
		||||
#   tags            = ["futureporn", "loadbalancer", "our", "tofu"]
 | 
			
		||||
#   ssh_key_ids     = [local.envs.VULTR_SSH_KEY_ID]
 | 
			
		||||
#   user_data       = base64encode(var.vps_user_data)
 | 
			
		||||
#   vpc_ids = [
 | 
			
		||||
#     vultr_vpc.futureporn_vpc.id
 | 
			
		||||
#   ]
 | 
			
		||||
#   reserved_ip_id = vultr_reserved_ip.futureporn_v2_ip.id
 | 
			
		||||
# }
 | 
			
		||||
 | 
			
		||||
# our0
 | 
			
		||||
resource "vultr_instance" "our_vps" {
 | 
			
		||||
  count           = 1
 | 
			
		||||
  hostname        = "fp-lb-${count.index}"
 | 
			
		||||
  plan            = "vc2-1c-2gb"
 | 
			
		||||
  hostname        = "fp-our-${count.index}"
 | 
			
		||||
  plan            = "vc2-4c-8gb"
 | 
			
		||||
  region          = "ord"
 | 
			
		||||
  backups         = "disabled"
 | 
			
		||||
  ddos_protection = "false"
 | 
			
		||||
  os_id           = 1743
 | 
			
		||||
  enable_ipv6     = true
 | 
			
		||||
  label           = "fp lb ${count.index}"
 | 
			
		||||
  tags            = ["futureporn", "load_balancer", "our"]
 | 
			
		||||
  label           = "fp our ${count.index}"
 | 
			
		||||
  tags            = ["futureporn", "our", "tofu"]
 | 
			
		||||
  ssh_key_ids     = [local.envs.VULTR_SSH_KEY_ID]
 | 
			
		||||
  user_data       = base64encode(var.vps_user_data)
 | 
			
		||||
  vpc_ids = [
 | 
			
		||||
    vultr_vpc.futureporn_vpc.id
 | 
			
		||||
  ]
 | 
			
		||||
  reserved_ip_id = vultr_reserved_ip.futureporn_v2_ip.id
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
resource "bunnynet_dns_record" "future_porn_apex" {
 | 
			
		||||
  zone  = bunnynet_dns_zone.future_porn.id
 | 
			
		||||
  name  = ""
 | 
			
		||||
  type  = "A"
 | 
			
		||||
  value = vultr_instance.our_loadbalancer[0].main_ip
 | 
			
		||||
  ttl   = 3600
 | 
			
		||||
}
 | 
			
		||||
# resource "bunnynet_dns_record" "future_porn_apex" {
 | 
			
		||||
#   zone  = bunnynet_dns_zone.future_porn.id
 | 
			
		||||
#   name  = ""
 | 
			
		||||
#   type  = "A"
 | 
			
		||||
#   value = vultr_instance.loadbalancer[0].main_ip
 | 
			
		||||
#   ttl   = 3600
 | 
			
		||||
# }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
resource "bunnynet_dns_record" "www_future_porn" {
 | 
			
		||||
@ -166,17 +182,17 @@ resource "bunnynet_dns_record" "www_future_porn" {
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# vultr instance for running our app
 | 
			
		||||
resource "vultr_instance" "our_server" {
 | 
			
		||||
  count           = 1
 | 
			
		||||
  hostname        = "fp-our-server-${count.index}"
 | 
			
		||||
resource "vultr_instance" "swarm_node" {
 | 
			
		||||
  count           = 2
 | 
			
		||||
  hostname        = "swarm-node-${count.index}"
 | 
			
		||||
  plan            = "vc2-2c-4gb"
 | 
			
		||||
  region          = "ord"
 | 
			
		||||
  backups         = "disabled"
 | 
			
		||||
  ddos_protection = "false"
 | 
			
		||||
  os_id           = 1743
 | 
			
		||||
  enable_ipv6     = true
 | 
			
		||||
  label           = "fp our server ${count.index}"
 | 
			
		||||
  tags            = ["futureporn", "our", "server"]
 | 
			
		||||
  label           = "swarm node ${count.index}"
 | 
			
		||||
  tags            = ["fp", "our", "server", "tofu"]
 | 
			
		||||
  ssh_key_ids     = [local.envs.VULTR_SSH_KEY_ID]
 | 
			
		||||
  vpc_ids = [
 | 
			
		||||
    vultr_vpc.futureporn_vpc.id
 | 
			
		||||
@ -184,24 +200,7 @@ resource "vultr_instance" "our_server" {
 | 
			
		||||
  user_data = base64encode(var.vps_user_data)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# vultr instance for running our app's background task runners
 | 
			
		||||
resource "vultr_instance" "our_worker" {
 | 
			
		||||
  count           = 2
 | 
			
		||||
  hostname        = "fp-our-worker-${count.index}"
 | 
			
		||||
  plan            = "vc2-2c-4gb"
 | 
			
		||||
  region          = "ord"
 | 
			
		||||
  backups         = "disabled"
 | 
			
		||||
  ddos_protection = "false"
 | 
			
		||||
  os_id           = 1743
 | 
			
		||||
  enable_ipv6     = true
 | 
			
		||||
  label           = "fp our worker ${count.index}"
 | 
			
		||||
  tags            = ["futureporn", "our", "worker"]
 | 
			
		||||
  ssh_key_ids     = [local.envs.VULTR_SSH_KEY_ID]
 | 
			
		||||
  vpc_ids = [
 | 
			
		||||
    vultr_vpc.futureporn_vpc.id
 | 
			
		||||
  ]
 | 
			
		||||
  user_data = base64encode(var.vps_user_data)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# # vultr instance meant for capturing VODs
 | 
			
		||||
@ -245,62 +244,42 @@ resource "vultr_instance" "our_worker" {
 | 
			
		||||
# }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
resource "vultr_instance" "database" {
 | 
			
		||||
  count    = 1
 | 
			
		||||
  hostname = "fp-db-${count.index}"
 | 
			
		||||
  plan     = "vc2-1c-2gb"
 | 
			
		||||
  region   = "ord"
 | 
			
		||||
  backups  = "enabled"
 | 
			
		||||
  backups_schedule {
 | 
			
		||||
    hour = "2"
 | 
			
		||||
    type = "daily"
 | 
			
		||||
  }
 | 
			
		||||
# This is our ipfs node with a really big dick, I mean disk
 | 
			
		||||
resource "vultr_instance" "ipfs_vps" {
 | 
			
		||||
  count           = 1
 | 
			
		||||
  hostname        = "fp-ipfs-${count.index}"
 | 
			
		||||
  plan            = "vc2-2c-2gb"
 | 
			
		||||
  region          = "ord"
 | 
			
		||||
  backups         = "disabled"
 | 
			
		||||
  ddos_protection = "false"
 | 
			
		||||
  os_id           = 1743
 | 
			
		||||
  enable_ipv6     = true
 | 
			
		||||
  vpc_ids         = [vultr_vpc.futureporn_vpc.id]
 | 
			
		||||
  label           = "fp database ${count.index}"
 | 
			
		||||
  tags            = ["futureporn", "database"]
 | 
			
		||||
  label           = "fp ipfs ${count.index}"
 | 
			
		||||
  tags            = ["futureporn", "ipfs", "tofu"]
 | 
			
		||||
  ssh_key_ids     = [local.envs.VULTR_SSH_KEY_ID]
 | 
			
		||||
  user_data       = base64encode(var.vps_user_data)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  # backups  = "enabled"
 | 
			
		||||
  # backups_schedule {
 | 
			
		||||
  #   hour = "2"
 | 
			
		||||
  #   type = "daily"
 | 
			
		||||
  # }
 | 
			
		||||
 | 
			
		||||
# resource "vultr_instance" "tracker" {
 | 
			
		||||
#   count           = 0
 | 
			
		||||
#   hostname        = "fp-tracker-${count.index}"
 | 
			
		||||
#   plan            = "vc2-1c-2gb"
 | 
			
		||||
#   region          = "ord"
 | 
			
		||||
#   backups         = "disabled"
 | 
			
		||||
#   ddos_protection = "false"
 | 
			
		||||
#   os_id           = 1743
 | 
			
		||||
#   enable_ipv6     = true
 | 
			
		||||
#   vpc_ids         = [vultr_vpc.futureporn_vpc.id]
 | 
			
		||||
#   label           = "fp tracker ${count.index}"
 | 
			
		||||
#   tags            = ["futureporn", "tracker"]
 | 
			
		||||
#   ssh_key_ids     = [local.envs.VULTR_SSH_KEY_ID]
 | 
			
		||||
#   user_data       = base64encode(var.vps_user_data)
 | 
			
		||||
#   reserved_ip_id  = vultr_reserved_ip.futureporn_tracker_ip.id
 | 
			
		||||
# }
 | 
			
		||||
 | 
			
		||||
resource "ansible_host" "ipfs_vps" {
 | 
			
		||||
  for_each = { for idx, host in var.ipfs_hosts : idx => host }
 | 
			
		||||
  name     = each.value
 | 
			
		||||
  for_each = { for idx, host in vultr_instance.ipfs_vps : idx => host }
 | 
			
		||||
  name     = each.value.main_ip   # <-- pick the IP or hostname here
 | 
			
		||||
  groups   = ["ipfs"]
 | 
			
		||||
 | 
			
		||||
  variables = {
 | 
			
		||||
    ansible_user = "root"
 | 
			
		||||
    ansible_host = each.value
 | 
			
		||||
    ansible_host = each.value.main_ip  # <-- pick the IP here too
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
resource "vultr_block_storage" "ipfs_blockstorage" {
 | 
			
		||||
  label = "fp-ipfs"
 | 
			
		||||
  size_gb = 5000
 | 
			
		||||
  region = "ord"
 | 
			
		||||
  attached_to_instance = vultr_instance.ipfs_vps[0].id
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# resource "ansible_host" "capture_vps" {
 | 
			
		||||
#   for_each = { for idx, host in vultr_instance.capture_vps : idx => host }
 | 
			
		||||
#   name     = each.value.hostname
 | 
			
		||||
@ -317,48 +296,34 @@ resource "ansible_host" "ipfs_vps" {
 | 
			
		||||
#   }
 | 
			
		||||
# }
 | 
			
		||||
 | 
			
		||||
resource "ansible_host" "load_balancer" {
 | 
			
		||||
  for_each = { for idx, host in vultr_instance.load_balancer : idx => host }
 | 
			
		||||
  name     = each.value.hostname
 | 
			
		||||
  groups   = ["load_balancer"]
 | 
			
		||||
  variables = {
 | 
			
		||||
    ansible_host = each.value.main_ip
 | 
			
		||||
    internal_ip  = each.value.internal_ip
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
# resource "ansible_host" "loadbalancer" {
 | 
			
		||||
#   count = length(vultr_instance.loadbalancer)
 | 
			
		||||
 | 
			
		||||
resource "ansible_host" "database" {
 | 
			
		||||
  for_each = { for idx, host in vultr_instance.database : idx => host }
 | 
			
		||||
  name     = each.value.hostname
 | 
			
		||||
  groups   = ["database"]
 | 
			
		||||
  variables = {
 | 
			
		||||
    ansible_host = each.value.main_ip
 | 
			
		||||
    internal_ip  = each.value.internal_ip
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
#   name   = vultr_instance.loadbalancer[count.index].hostname
 | 
			
		||||
#   groups = ["loadbalancer"]
 | 
			
		||||
#   variables = {
 | 
			
		||||
#     ansible_host = vultr_instance.loadbalancer[count.index].main_ip
 | 
			
		||||
#     internal_ip  = vultr_instance.loadbalancer[count.index].internal_ip
 | 
			
		||||
#   }
 | 
			
		||||
# }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
resource "ansible_host" "our_server" {
 | 
			
		||||
  for_each = { for idx, host in vultr_instance.our_server : idx => host }
 | 
			
		||||
 | 
			
		||||
resource "ansible_host" "swarm_node" {
 | 
			
		||||
  for_each = { for idx, host in vultr_instance.swarm_node : idx => host }
 | 
			
		||||
  name     = each.value.hostname
 | 
			
		||||
  groups   = ["our-server"]
 | 
			
		||||
  groups   = ["swarm-node"]
 | 
			
		||||
  variables = {
 | 
			
		||||
    ansible_host      = each.value.main_ip
 | 
			
		||||
    internal_ip       = each.value.internal_ip
 | 
			
		||||
    vultr_instance_id = each.value.id
 | 
			
		||||
    vultr_vfs_storage_id = vultr_virtual_file_system_storage.vfs.id
 | 
			
		||||
    # Set swarm manager only on the 0th host
 | 
			
		||||
    swarm_enable_manager  = each.key == 0 ? true : false
 | 
			
		||||
    swarm_enable_worker   = each.key == 0 ? false : true
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
resource "ansible_host" "our_worker" {
 | 
			
		||||
  for_each = { for idx, host in vultr_instance.our_worker : idx => host }
 | 
			
		||||
  name     = each.value.hostname
 | 
			
		||||
  groups   = ["our-worker"]
 | 
			
		||||
  variables = {
 | 
			
		||||
    ansible_host      = each.value.main_ip
 | 
			
		||||
    internal_ip       = each.value.internal_ip
 | 
			
		||||
    vultr_instance_id = each.value.id
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# resource "ansible_host" "tracker" {
 | 
			
		||||
@ -372,24 +337,26 @@ resource "ansible_host" "our_worker" {
 | 
			
		||||
#   }
 | 
			
		||||
# }
 | 
			
		||||
 | 
			
		||||
resource "ansible_host" "our" {
 | 
			
		||||
  for_each = { for idx, host in vultr_instance.our_vps : idx => host }
 | 
			
		||||
  name     = each.value.hostname
 | 
			
		||||
  groups   = ["our"]
 | 
			
		||||
# resource "ansible_host" "our" {
 | 
			
		||||
#   for_each = { for idx, host in vultr_instance.our_vps : idx => host }
 | 
			
		||||
#   name     = each.value.hostname
 | 
			
		||||
#   groups   = ["our"]
 | 
			
		||||
 | 
			
		||||
  variables = {
 | 
			
		||||
    ansible_host         = each.value.main_ip
 | 
			
		||||
    internal_ip          = each.value.internal_ip
 | 
			
		||||
    vultr_instance_id    = each.value.id
 | 
			
		||||
    vultr_vfs_storage_id = vultr_virtual_file_system_storage.vfs.id
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
#   variables = {
 | 
			
		||||
#     ansible_host         = each.value.main_ip
 | 
			
		||||
#     internal_ip          = each.value.internal_ip
 | 
			
		||||
#     vultr_instance_id    = each.value.id
 | 
			
		||||
#     vultr_vfs_storage_id = vultr_virtual_file_system_storage.vfs.id
 | 
			
		||||
#   }
 | 
			
		||||
# }
 | 
			
		||||
 | 
			
		||||
resource "vultr_virtual_file_system_storage" "vfs" {
 | 
			
		||||
  label   = "fp-vfs-cache"
 | 
			
		||||
  size_gb = 200
 | 
			
		||||
  region  = "ord"
 | 
			
		||||
  tags    = ["our", "vfs"]
 | 
			
		||||
 | 
			
		||||
  attached_instances = vultr_instance.swarm_node[*].id
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -399,12 +366,8 @@ resource "vultr_virtual_file_system_storage" "vfs" {
 | 
			
		||||
# }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
resource "ansible_group" "our-server" {
 | 
			
		||||
  name = "our-server"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
resource "ansible_group" "our-worker" {
 | 
			
		||||
  name = "our-worker"
 | 
			
		||||
resource "ansible_group" "swarm-node" {
 | 
			
		||||
  name = "swarm-node"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -413,23 +376,22 @@ resource "ansible_group" "our" {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
resource "ansible_group" "load_balancer" {
 | 
			
		||||
  name = "load_balancer"
 | 
			
		||||
}
 | 
			
		||||
# resource "ansible_group" "loadbalancer" {
 | 
			
		||||
#   name = "loadbalancer"
 | 
			
		||||
# }
 | 
			
		||||
 | 
			
		||||
resource "ansible_group" "database" {
 | 
			
		||||
  name = "database"
 | 
			
		||||
resource "ansible_group" "ipfs" {
 | 
			
		||||
  name = "ipfs"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
resource "ansible_group" "futureporn" {
 | 
			
		||||
  name = "futureporn"
 | 
			
		||||
  children = [
 | 
			
		||||
    "load_balancer",
 | 
			
		||||
    "database",
 | 
			
		||||
    # "loadbalancer",
 | 
			
		||||
    "capture",
 | 
			
		||||
    "our-server",
 | 
			
		||||
    "our-worker",
 | 
			
		||||
    "our"
 | 
			
		||||
    "swarm-node",
 | 
			
		||||
    "our",
 | 
			
		||||
    "ipfs"
 | 
			
		||||
  ]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user