capture progress
ci / build (push) Has been cancelled Details

This commit is contained in:
CJ_Clippy 2024-07-25 05:53:52 -08:00
parent af8f58940a
commit 4bc11c027e
32 changed files with 2147 additions and 386 deletions

18
.vscode/launch.json vendored Normal file
View File

@ -0,0 +1,18 @@
{
"name": "tsx",
"type": "node",
"request": "launch",
"program": "${file}",
"runtimeExecutable": "tsx",
"console": "integratedTerminal",
"internalConsoleOptions": "neverOpen",
"skipFiles": [
"<node_internals>/**",
"${workspaceFolder}/node_modules/**",
],
}

View File

@ -47,22 +47,7 @@ clean:
dotenvx run -f .env.${ENV} -- node ./packages/infra/vultr-delete-orphaned-resources.js
deps:
echo "Some of the install methods for these dependencies are not cross-platform compatible. Some of the install methods are not tested. Expect this to fail. Please consult the Makefile for URLs to project sources."
sudo pamac install make entr nvm kubectl docker helm expect
curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | bash
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
echo "go to https://github.com/txn2/kubefwd/releases/latest to get kubefwd"
echo "go to https://github.com/tilt-dev/ctlptl/releases/latest to get ctlptl"
sudo systemctl enable docker
sudo systemctl start docker
usermod -aG docker cj
newgrp docker
npm i -g pnpm
pnpm install -g @dotenvx/dotenvx
curl -OL 'https://github.com/vmware-tanzu/velero/releases/download/v1.13.2/velero-v1.13.2-linux-amd64.tar.gz'
OS=$(go env GOOS); ARCH=$(go env GOARCH); curl -fsSL -o cmctl https://github.com/cert-manager/cmctl/releases/latest/download/cmctl_${OS}_${ARCH}
chmod +x cmctl
sudo mv cmctl /usr/local/bin
echo "use `devbox install`"
# A gitea act runner which runs locally
# https://docs.gitea.com/next/usage/actions/overview

View File

@ -1,9 +1,33 @@
# futureporn.net
[![Built with Devbox](https://www.jetify.com/img/devbox/shield_galaxy.svg)](https://www.jetify.com/devbox/docs/contributor-quickstart/)
Source Code for https://futureporn.net
See ./ARCHITECTURE.md for overview
## Getting Started
I'm working towards a better development experience with devbox and Tilt. This process is in a state of flux and is likely to be broken.
The main gist is as follows
1. Install [devbox](https://www.jetify.com/devbox/docs/installing_devbox/)
2. Install development environment & packages using devbox.
devbox install
3. Start a local KIND Kubernetes cluster
make cluster
4. Start Tilt
make tilt
Tilt will manage the KIND cluster, downloading necessary docker containers and building the containers listed in the fp helm chart at ./Charts/fp. Making changes to these charts or the application code will update or re-build the images as necessary.
## Metrics Notes
Keeping track of metrics we want to scrape using Prometheus

View File

@ -181,14 +181,20 @@ cmd_button('postgres:drop',
argv=['sh', './scripts/postgres-drop.sh'],
resource='postgresql-primary',
icon_name='delete',
text='DROP futureporn_db'
text='DROP all databases'
)
cmd_button('postgres:backup',
argv=['sh', './scripts/postgres-backup.sh'],
cmd_button('capture-api:create',
argv=['http', '--ignore-stdin', 'POST', 'http://localhost:5003/api/record', "url='https://twitch.tv/ironmouse'", "channel='ironmouse'"],
resource='capture-api',
icon_name='send',
text='Start Recording'
)
cmd_button('postgres:graphile',
argv=['sh', './scripts/postgres-test-graphile.sh'],
resource='postgresql-primary',
icon_name='download',
text='backup the database'
icon_name='graph',
text='create graphile test job',
)
cmd_button('postgres:graphile',
argv=['sh', './scripts/postgres-test-graphile.sh'],

View File

@ -45,6 +45,22 @@ spec:
key: databaseUrl
- name: PORT
value: "{{ .Values.capture.api.port }}"
- name: S3_ENDPOINT
value: "{{ .Values.s3.endpoint }}"
- name: S3_REGION
value: "{{ .Values.s3.region }}"
- name: S3_BUCKET_NAME
value: "{{ .Values.s3.buckets.usc }}"
- name: S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: capture
key: s3AccessKeyId
- name: S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: capture
key: s3SecretAccessKey
resources:
limits:
cpu: 1000m

View File

@ -1,6 +1,12 @@
environment: development
# storageClassName: csi-hostpath-sc # used by minikube
storageClassName: standard # used by Kind
s3:
endpoint: https://s3.us-west-000.backblazeb2.com
region: us-west-000
buckets:
main: fp-dev
usc: fp-usc-dev
link2cid:
imageName: fp/link2cid
next:

View File

@ -44,7 +44,9 @@ RUN ls -la /prod/capture
## start the app with dumb init to spawn the Node.js runtime process
## with signal support
## The mode @futureporn/capture uses when starting is determined by FUNCTION environment variable. (worker|api)
FROM base AS capture
RUN ls -la /usr/local/bin/yt-dlp
ENV HOSTNAME=0.0.0.0 NODE_ENV=production
COPY --from=build /prod/capture .
CMD [ "dumb-init", "node", "dist/index.js" ]

24
devbox.json Normal file
View File

@ -0,0 +1,24 @@
{
"$schema": "https://raw.githubusercontent.com/jetify-com/devbox/0.12.0/.schema/devbox.schema.json",
"packages": [
"nodejs@20",
"tilt@latest",
"ctlptl@latest",
"kubectl@latest",
"cmctl@latest"
],
"env": {
"DEVBOX_COREPACK_ENABLED": "true",
"ENV": "development"
},
"shell": {
"init_hook": [
"pnpm install"
],
"scripts": {
"test": [
"echo \"Error: no test specified\" && exit 1"
]
}
}
}

298
devbox.lock Normal file
View File

@ -0,0 +1,298 @@
{
"lockfile_version": "1",
"packages": {
"cmctl@latest": {
"last_modified": "2024-07-07T07:43:47Z",
"resolved": "github:NixOS/nixpkgs/b60793b86201040d9dee019a05089a9150d08b5b#cmctl",
"source": "devbox-search",
"version": "1.14.7",
"systems": {
"aarch64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/69lc5y36840ccy6d1pzph994psadk4bm-cmctl-1.14.7",
"default": true
}
],
"store_path": "/nix/store/69lc5y36840ccy6d1pzph994psadk4bm-cmctl-1.14.7"
},
"aarch64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/d5l61iil1gaax5sipnzg80mb0p1hqk9f-cmctl-1.14.7",
"default": true
}
],
"store_path": "/nix/store/d5l61iil1gaax5sipnzg80mb0p1hqk9f-cmctl-1.14.7"
},
"x86_64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/j0r3wavi836mp31l0s7r1c3rjryw2z62-cmctl-1.14.7",
"default": true
}
],
"store_path": "/nix/store/j0r3wavi836mp31l0s7r1c3rjryw2z62-cmctl-1.14.7"
},
"x86_64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/lnmy96wjzmjna7z9f0dbqd16nf2x5qbv-cmctl-1.14.7",
"default": true
}
],
"store_path": "/nix/store/lnmy96wjzmjna7z9f0dbqd16nf2x5qbv-cmctl-1.14.7"
}
}
},
"ctlptl@latest": {
"last_modified": "2024-07-07T07:43:47Z",
"resolved": "github:NixOS/nixpkgs/b60793b86201040d9dee019a05089a9150d08b5b#ctlptl",
"source": "devbox-search",
"version": "0.8.29",
"systems": {
"aarch64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/gvnmvb315zngbg5a0idynlwxcc45gmyd-ctlptl-0.8.29",
"default": true
}
],
"store_path": "/nix/store/gvnmvb315zngbg5a0idynlwxcc45gmyd-ctlptl-0.8.29"
},
"aarch64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/vgwbxwaf773mmgcbc1j5h6n5a7g587lf-ctlptl-0.8.29",
"default": true
}
],
"store_path": "/nix/store/vgwbxwaf773mmgcbc1j5h6n5a7g587lf-ctlptl-0.8.29"
},
"x86_64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/ga4rp2188c6k24162s2n23nfi4846790-ctlptl-0.8.29",
"default": true
}
],
"store_path": "/nix/store/ga4rp2188c6k24162s2n23nfi4846790-ctlptl-0.8.29"
},
"x86_64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/wbfjbk575ny949rfyqpm4vai4ap5rbpp-ctlptl-0.8.29",
"default": true
}
],
"store_path": "/nix/store/wbfjbk575ny949rfyqpm4vai4ap5rbpp-ctlptl-0.8.29"
}
}
},
"kubectl@latest": {
"last_modified": "2024-07-07T07:43:47Z",
"resolved": "github:NixOS/nixpkgs/b60793b86201040d9dee019a05089a9150d08b5b#kubectl",
"source": "devbox-search",
"version": "1.30.2",
"systems": {
"aarch64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/i1zidf41bkfzs2l1pq9fi1frymsfgywc-kubectl-1.30.2",
"default": true
},
{
"name": "man",
"path": "/nix/store/dzxnn9mk9plcx3w9862jyd0nxys2yywz-kubectl-1.30.2-man",
"default": true
},
{
"name": "convert",
"path": "/nix/store/v9ij5fnxxa02jkzpjvkbxw2jc4p9cbld-kubectl-1.30.2-convert"
}
],
"store_path": "/nix/store/i1zidf41bkfzs2l1pq9fi1frymsfgywc-kubectl-1.30.2"
},
"aarch64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/k7ql4247qs6ny27m3iz5c9xf5gb248a2-kubectl-1.30.2",
"default": true
},
{
"name": "man",
"path": "/nix/store/wy64r4nn3isydw4nx257h95qy2x2z4mx-kubectl-1.30.2-man",
"default": true
},
{
"name": "convert",
"path": "/nix/store/ic8za302hvb4kf4zrs55ivr4q2n2lznn-kubectl-1.30.2-convert"
}
],
"store_path": "/nix/store/k7ql4247qs6ny27m3iz5c9xf5gb248a2-kubectl-1.30.2"
},
"x86_64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/v029n959l5b289br0cq591b04yc48516-kubectl-1.30.2",
"default": true
},
{
"name": "man",
"path": "/nix/store/0dvcxn7gsi2ycy9blb7pcy506w4xp2vi-kubectl-1.30.2-man",
"default": true
},
{
"name": "convert",
"path": "/nix/store/2nfq4ivwa4a7jwc0183f2wpl1jxbn754-kubectl-1.30.2-convert"
}
],
"store_path": "/nix/store/v029n959l5b289br0cq591b04yc48516-kubectl-1.30.2"
},
"x86_64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/3vkf0406s1i6l89hk5wrakh4bbn0p1p2-kubectl-1.30.2",
"default": true
},
{
"name": "man",
"path": "/nix/store/3wbvgkkka1knkxvyr4c8qbpr448smw8i-kubectl-1.30.2-man",
"default": true
},
{
"name": "convert",
"path": "/nix/store/h5zxz8db6wligwhw5gnwk4gbc8j1ixik-kubectl-1.30.2-convert"
}
],
"store_path": "/nix/store/3vkf0406s1i6l89hk5wrakh4bbn0p1p2-kubectl-1.30.2"
}
}
},
"nodejs@20": {
"last_modified": "2024-07-07T07:43:47Z",
"plugin_version": "0.0.2",
"resolved": "github:NixOS/nixpkgs/b60793b86201040d9dee019a05089a9150d08b5b#nodejs_20",
"source": "devbox-search",
"version": "20.14.0",
"systems": {
"aarch64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/sqnbldm1fjw88v23yq4v6531y4m7v2fh-nodejs-20.14.0",
"default": true
},
{
"name": "libv8",
"path": "/nix/store/1i0rb2axkrxvsq5pz8s2q07ard2p36a1-nodejs-20.14.0-libv8"
}
],
"store_path": "/nix/store/sqnbldm1fjw88v23yq4v6531y4m7v2fh-nodejs-20.14.0"
},
"aarch64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/r1nwmlbsn67f5rhayr7jjjdmiflxpk92-nodejs-20.14.0",
"default": true
},
{
"name": "libv8",
"path": "/nix/store/5ii3xkbd3iv0xvqqvjg3agsm0dinidgm-nodejs-20.14.0-libv8"
}
],
"store_path": "/nix/store/r1nwmlbsn67f5rhayr7jjjdmiflxpk92-nodejs-20.14.0"
},
"x86_64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/wzgnws4r1c98vzj5q6gq4drz2jfq7d5q-nodejs-20.14.0",
"default": true
},
{
"name": "libv8",
"path": "/nix/store/gc2gnkc8hvkh51ab3a29fvgzy2qsqb2s-nodejs-20.14.0-libv8"
}
],
"store_path": "/nix/store/wzgnws4r1c98vzj5q6gq4drz2jfq7d5q-nodejs-20.14.0"
},
"x86_64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/ilkfhnqz4xczrliqjva8770x2svbfznd-nodejs-20.14.0",
"default": true
},
{
"name": "libv8",
"path": "/nix/store/2qaf68dzimr8as4bgli0xmsn11c0ah2j-nodejs-20.14.0-libv8"
}
],
"store_path": "/nix/store/ilkfhnqz4xczrliqjva8770x2svbfznd-nodejs-20.14.0"
}
}
},
"tilt@latest": {
"last_modified": "2024-07-15T21:47:20Z",
"resolved": "github:NixOS/nixpkgs/b2c1f10bfbb3f617ea8e8669ac13f3f56ceb2ea2#tilt",
"source": "devbox-search",
"version": "0.33.17",
"systems": {
"aarch64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/l19qinywsx7y86xp8vgwr3bgnbi0rfcj-tilt-0.33.17",
"default": true
}
],
"store_path": "/nix/store/l19qinywsx7y86xp8vgwr3bgnbi0rfcj-tilt-0.33.17"
},
"aarch64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/f6swxifmvnxjxifxyw4k4aiyxh0dgfyq-tilt-0.33.17",
"default": true
}
],
"store_path": "/nix/store/f6swxifmvnxjxifxyw4k4aiyxh0dgfyq-tilt-0.33.17"
},
"x86_64-darwin": {
"outputs": [
{
"name": "out",
"path": "/nix/store/d59rlsabcqxax6bgw6d30zhmflw65ch0-tilt-0.33.17",
"default": true
}
],
"store_path": "/nix/store/d59rlsabcqxax6bgw6d30zhmflw65ch0-tilt-0.33.17"
},
"x86_64-linux": {
"outputs": [
{
"name": "out",
"path": "/nix/store/qfv96sjcsslynqbilwj823x8nxvgj5cv-tilt-0.33.17",
"default": true
}
],
"store_path": "/nix/store/qfv96sjcsslynqbilwj823x8nxvgj5cv-tilt-0.33.17"
}
}
}
}
}

View File

@ -15,6 +15,10 @@ Worker container runs a Graphile Worker which listens for specific tasks related
## Misc dev notes
### idea for taking snapshots of stream in progress
https://ofarukcaki.medium.com/producing-real-time-video-with-node-js-and-ffmpeg-a59ac27461a1
### youtube-dl end of stream output
The end-of-stream output from yt-dlp when recording CB looks like this

View File

@ -21,6 +21,7 @@
"@futureporn/utils": "workspace:^",
"@paralleldrive/cuid2": "^2.2.2",
"@types/chai": "^4.3.16",
"@types/fluent-ffmpeg": "^2.1.24",
"@types/mocha": "^10.0.7",
"@types/node": "^20.14.11",
"diskusage": "^1.2.0",
@ -46,8 +47,12 @@
"youtube-dl-wrap": "github:insanity54/youtube-dl-wrap"
},
"devDependencies": {
"@smithy/util-stream": "^3.1.2",
"@types/sinon": "^17.0.3",
"@types/sinon-chai": "^3.2.12",
"aws-sdk": "^2.1663.0",
"aws-sdk-client-mock": "^4.0.1",
"aws-sdk-mock": "^6.0.4",
"chai": "^4.4.1",
"cheerio": "1.0.0-rc.12",
"mocha": "^10.7.0",
@ -55,6 +60,8 @@
"node-abort-controller": "^3.1.1",
"node-fetch": "^3.3.2",
"nodemon": "^2.0.22",
"pretty-bytes": "^6.1.1",
"s3": "link:aws-sdk/clients/s3",
"sinon": "^15.2.0",
"sinon-chai": "^3.7.0",
"sinon-test": "^3.1.6",

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +1,67 @@
import Record from "./Record.js"
import { expect } from "chai"
import { ChildProcess, spawn } from "child_process"
import { createReadStream, readFileSync, ReadStream } from "fs"
import AWSMock from 'aws-sdk-mock'
import sinon from 'sinon';
import sinonChai from 'sinon-chai';
import { PutObjectCommand, PutObjectCommandInput, S3Client, CreateMultipartUploadCommand, UploadPartCommand, UploadPartCommandInput } from "@aws-sdk/client-s3"
import { join, dirname } from "path"
import { fileURLToPath } from "url"
import { S3 } from 'aws-sdk';
import { HeadObjectOutput } from 'aws-sdk/clients/s3';
import { Readable } from 'stream';
import { mockClient } from 'aws-sdk-client-mock';
import { sdkStreamMixin } from '@smithy/util-stream'
// "pay no attention to that man behind the curtain"
// export function getObjectReadStream(s3Client: S3, Bucket: string, Key: string): Readable {
// return s3Client.getObject({ Bucket, Key }).createReadStream();
// }
// export async function waitForObjectExists(
// s3Client: S3Client,
// Bucket: string,
// Key: string
// ): Promise<HeadObjectOutput> {
// return s3Client.waitFor('objectExists', { Bucket, Key }).promise();
// }
const __dirname = dirname(fileURLToPath(import.meta.url));
const testStreamDir = '/tmp/record-test-stream.m3u8'
describe('Record', function () {
let record: Record
this.beforeEach(function () {
record = new Record({ url: 'https://example.com/my-cool-stream' })
// mocking @aws-sdk/lib-storage Upload() has some nuances.
// @see https://github.com/m-radzikowski/aws-sdk-client-mock?tab=readme-ov-file#lib-storage-upload
it('should accept a {ReadStream} as input', async function () {
const inputStream = createReadStream(join(__dirname, './fixtures/mock-stream0.mp4')) // 192627 bytes
const s3ClientMock = mockClient(S3Client)
s3ClientMock.on(CreateMultipartUploadCommand).resolves({UploadId: '1'});
s3ClientMock.on(UploadPartCommand).resolves({ETag: '1'});
const s3Client = new S3Client({ region: 'us-west-000' })
const record = new Record({ inputStream, s3Client, channel: 'coolguy_69', bucket: 'test' })
await record.start()
expect(record).to.have.property('counter', 192627)
expect(record).to.have.property('bucket', 'test')
})
describe('start()', function () {
it('should start the recording', async function () {
await record.start()
expect(record).to.have.property('id')
expect(record).to.have.property('url')
})
})
describe('stop()', function () {
it('should stop the recording', async function () {
await record.stop()
expect(record).to.have.property('cdnUrl')
})
xit('should restart if a EPIPE is encountered', async function () {
// @todo IDK how to implement this.
const inputStream = createReadStream(join(__dirname, './fixtures/mock-stream0.mp4'))
const s3ClientMock = mockClient(S3Client)
s3ClientMock.on(CreateMultipartUploadCommand).resolves({UploadId: '1'})
s3ClientMock.on(UploadPartCommand).rejectsOnce({cause: 'simulated network interruption'}).resolves({ ETag: '1' }) // this rejection is probably not specific enough to simulate EPIPE
const s3Client = new S3Client({ region: 'us-west-000' })
const record = new Record({ inputStream, s3Client, channel: 'coolguy_69', bucket: 'test' })
await record.start()
expect(record).to.have.property('counter', 192627)
})
})

View File

@ -1,102 +1,184 @@
import { createId } from '@paralleldrive/cuid2'
import { spawn } from 'child_process';
import { ua0 } from '@futureporn/scout/ua.js'
import { getTmpFile } from '@futureporn/utils'
import { PassThrough, pipeline, Readable } from 'stream';
import prettyBytes from 'pretty-bytes';
import { Upload } from "@aws-sdk/lib-storage";
import { S3Client } from "@aws-sdk/client-s3";
import 'dotenv/config'
export interface RecordArgs {
url: string;
filename?: string;
channel?: string;
channel: string;
s3Client: S3Client;
bucket: string;
date?: string;
inputStream: Readable;
}
interface MakeS3ClientOptions {
accessKeyId: string;
secretAccessKey: string;
region: string;
endpoint: string
}
interface getFFmpegDownloadOptions {
url: string;
}
export default class Record {
readonly id: string;
readonly url: string;
private s3Client: S3Client;
private uploadStream: PassThrough;
private ticker?: NodeJS.Timeout;
inputStream: Readable;
counter: number;
bucket: string;
keyName: string;
datestamp: string;
filename?: string;
channel?: string;
channel: string;
date?: string;
constructor({ url }: RecordArgs) {
if (!url) throw new Error('url passed to Record constructor was undefined.');
constructor({ inputStream, channel, s3Client, bucket }: RecordArgs) {
if (!inputStream) throw new Error('Record constructor was missing inputStream.');
if (!bucket) throw new Error('Record constructor was missing bucket.');
if (!channel) throw new Error('Record constructer was missing channel!');
if (!s3Client) throw new Error('Record constructer was missing s3Client');
this.inputStream = inputStream
this.id = createId()
this.url = url
this.s3Client = s3Client
this.bucket = bucket
this.channel = channel
this.counter = 0
this.datestamp = new Date().toISOString()
this.keyName = `${this.datestamp}-${channel}-${createId()}.ts`
this.uploadStream = new PassThrough()
}
async start() {
console.log(`@TODO record start with id=${this.id}, url=${this.url}`)
makeProgressTicker() {
this.ticker = setInterval(() => {
console.log(`[progress] ${this.counter} bytes (aggregate) (${prettyBytes(this.counter)}) have passed through the pipeline.`)
}, 1000 * 30)
}
const playlistUrlPromise = new Promise<string>((resolve) => {
const ytdlp = spawn('yt-dlp', [
'-g',
this.url
])
ytdlp.stdout.on('data', (data) => {
resolve(data)
})
static makeS3Client({
accessKeyId,
secretAccessKey,
region,
endpoint
}: MakeS3ClientOptions): S3Client {
const client = new S3Client({
endpoint,
region,
credentials: {
accessKeyId,
secretAccessKey
}
})
const playlistUrl = await playlistUrlPromise
const filename = getTmpFile(`stream.ts`)
return client
}
const ffmpeg = spawn('ffmpeg', [
static getFFmpegDownload({ url }: getFFmpegDownloadOptions): Readable {
const ffmpegProc = spawn('ffmpeg', [
'-headers', `"User-Agent: ${ua0}"`,
'-i', playlistUrl,
'-i', url,
'-c:v', 'copy',
'-c:a', 'copy',
'-movflags', 'faststart',
'-y',
'-f', 'mpegts',
filename
])
const ffmpegProcess = spawn('ffmpeg', [
'-headers', `"User-Agent: ${ua0}"`,
'-i', playlistUrl,
'-c:v', 'copy',
'-c:a', 'copy',
'-movflags', 'faststart',
'-y',
'-f', 'mpegts',
filename
'-loglevel', 'quiet',
'pipe:1'
], {
stdio: 'inherit'
});
// ignoring stderr is important because if not, ffmpeg will fill that buffer and node will hang
stdio: ['pipe', 'pipe', 'ignore']
})
return ffmpegProc.stdout
const ps = spawn('ps', ['ax']);
const grep = spawn('grep', ['ssh']);
ps.stdout.on('data', (data) => {
grep.stdin.write(data);
});
ps.stderr.on('data', (data) => {
console.error(`ps stderr: ${data}`);
});
ps.on('close', (code) => {
if (code !== 0) {
console.log(`ps process exited with code ${code}`);
}
async uploadToS3() {
const target = {
Bucket: this.bucket,
Key: this.keyName,
// We do this to keep TS happy. Body expects a Readable, not a ReadableStream nor a NodeJS.ReadableStream
// Body: new Readable().wrap(this.uploadStream)
Body: this.uploadStream
}
// greets https://stackoverflow.com/a/70159394/1004931
try {
const parallelUploads3 = new Upload({
client: this.s3Client,
partSize: 1024 * 1024 * 5,
queueSize: 1,
leavePartsOnError: false,
params: target,
});
parallelUploads3.on("httpUploadProgress", (progress) => {
console.log(progress)
if (progress?.loaded) {
console.log(`loaded ${progress.loaded} bytes (${prettyBytes(progress.loaded)})`);
} else {
console.log(`httpUploadProgress ${JSON.stringify(progress, null, 2)}`)
}
});
await parallelUploads3.done();
} catch (e) {
if (e instanceof Error) {
console.error(`while uploading a file to s3, we encountered an error`)
throw new Error(e.message);
} else {
throw new Error(`error of some sort ${JSON.stringify(e, null, 2)}`)
}
grep.stdin.end();
});
}
}
grep.stdout.on('data', (data) => {
console.log(data.toString());
});
grep.stderr.on('data', (data) => {
console.error(`grep stderr: ${data}`);
});
grep.on('close', (code) => {
if (code !== 0) {
console.log(`grep process exited with code ${code}`);
async start() {
this.makeProgressTicker()
// streams setup
this.uploadStream.on('data', (data) => {
this.counter += data.length
})
// stream pipeline setup
pipeline(
this.inputStream,
this.uploadStream,
(err) => {
if (err) {
console.error(`pipeline errored.`)
console.error(err)
} else {
console.log('pipeline succeeded.')
}
}
});
)
await this.uploadToS3()
clearInterval(this.ticker)
return {
id: this.id,
url: this.url
keyName: this.keyName,
channel: this.channel
}
}

View File

@ -6,7 +6,8 @@ import graphileWorkerPlugin, { type ExtendedFastifyInstance } from './fastify-gr
const version = getPackageVersion('../package.json')
interface RecordBodyType {
url: string
url: string;
channel: string;
}
function build(opts: Record<string, any>={}, connectionString: string) {
@ -17,9 +18,10 @@ function build(opts: Record<string, any>={}, connectionString: string) {
return { app: '@futureporn/capture', version }
})
app.post('/api/record', async function (request: FastifyRequest<{ Body: RecordBodyType }>, reply) {
const { url } = request.body
console.log(`POST /api/record with url=${url}`)
const job = await app.graphile.addJob('record', { url })
const { url, channel } = request.body
console.log(`POST /api/record with url=${url}, channel=${channel}`)
const job = await app.graphile.addJob('record', { url, channel })
return job
})
return app

View File

@ -0,0 +1,2 @@
application/vnd.apple.mpegurl mp4
text/x-abc abc

View File

@ -0,0 +1 @@
asdfaslfk;sdf

View File

@ -1,77 +0,0 @@
'use strict'
import { build } from './app.js'
import 'dotenv/config'
import { run } from 'graphile-worker'
import { dirname } from 'node:path';
import { fileURLToPath } from 'url';
const __dirname = dirname(fileURLToPath(import.meta.url));
if (!process.env.DATABASE_URL) throw new Error('DATABASE_URL is missing in env');
if (!process.env.FUNCTION) throw new Error(`FUNCTION env var was missing. FUNCTION env var must be either 'api' or 'worker'.`);
const connectionString = process.env.DATABASE_URL!
async function api() {
if (!process.env.PORT) throw new Error('PORT is missing in env');
const PORT = parseInt(process.env.PORT!)
const fastifyOpts = {
logger: {
level: 'info',
transport: {
target: 'pino-pretty'
}
}
}
const server = build(fastifyOpts, connectionString)
server.listen({ port: PORT }, (err) => {
if (err) {
server.log.error(err)
process.exit(1)
}
})
}
async function worker() {
const concurrency = (process.env?.WORKER_CONCURRENCY) ? parseInt(process.env.WORKER_CONCURRENCY) : 1
// Run a worker to execute jobs:
const runner = await run({
connectionString,
concurrency,
// Install signal handlers for graceful shutdown on SIGINT, SIGTERM, etc
noHandleSignals: false,
pollInterval: 1000,
taskDirectory: `${__dirname}/tasks`,
});
// Immediately await (or otherwise handle) the resulting promise, to avoid
// "unhandled rejection" errors causing a process crash in the event of
// something going wrong. console.log()
await runner.promise;
// If the worker exits (whether through fatal error or otherwise), the above
// promise will resolve/reject.
}
async function main() {
if (process.env.FUNCTION === 'api') {
api()
} else if (process.env.FUNCTION === 'worker') {
worker()
} else {
throw new Error('process.env.FUNCTION must be either api or worker. got '+process.env.FUNCTION)
}
}
main().catch((err) => {
console.error(err);
process.exit(1);
});

View File

@ -1,80 +0,0 @@
'use strict'
import { build } from './app.js'
import 'dotenv/config'
import { run } from 'graphile-worker'
import { dirname } from 'node:path';
import { fileURLToPath } from 'url';
const __dirname = dirname(fileURLToPath(import.meta.url));
if (!process.env.DATABASE_URL) throw new Error('DATABASE_URL is missing in env');
if (!process.env.FUNCTION) throw new Error(`FUNCTION env var was missing. FUNCTION env var must be either 'api' or 'worker'.`);
const connectionString = process.env.DATABASE_URL!
async function api() {
if (!process.env.PORT) throw new Error('PORT is missing in env');
const PORT = parseInt(process.env.PORT!)
const fastifyOpts = {
logger: {
level: 'info',
transport: {
target: 'pino-pretty'
}
}
}
const server = build(fastifyOpts, connectionString)
server.listen({ port: PORT }, (err) => {
if (err) {
server.log.error(err)
process.exit(1)
}
})
}
async function worker() {
if (!process.env.WORKER_CONCURRENCY) throw new Error('WORKER_CONCURRENCY is missing in env');
const concurrency = (process.env?.WORKER_CONCURRENCY) ? parseInt(process.env.WORKER_CONCURRENCY) : 1
// Run a worker to execute jobs:
const runner = await run({
connectionString: process.env.DATABASE_URL!,
concurrency,
// Install signal handlers for graceful shutdown on SIGINT, SIGTERM, etc
noHandleSignals: false,
pollInterval: 1000,
taskDirectory: `${__dirname}/tasks`,
});
// Immediately await (or otherwise handle) the resulting promise, to avoid
// "unhandled rejection" errors causing a process crash in the event of
// something going wrong. console.log()
await runner.promise;
// If the worker exits (whether through fatal error or otherwise), the above
// promise will resolve/reject.
}
async function main() {
if (process.env.FUNCTION === 'worker') {
worker()
} else if (process.env.FUNCTION === 'api') {
api()
} else {
console.error(`FUNCTION environment variable must be 'worker' or 'api', but it was set to ${process.env.FUNCTION}`)
}
}
main().catch((err) => {
console.error(err);
process.exit(1);
});

View File

@ -1,41 +0,0 @@
'use strict'
import { run } from 'graphile-worker'
import { dirname } from 'node:path';
import { fileURLToPath } from 'url';
const __dirname = dirname(fileURLToPath(import.meta.url));
if (!process.env.DATABASE_URL) throw new Error('DATABASE_URL is undefined in env');
const concurrency = (process.env?.WORKER_CONCURRENCY) ? parseInt(process.env.WORKER_CONCURRENCY) : 1
async function main() {
// Run a worker to execute jobs:
const runner = await run({
connectionString: process.env.DATABASE_URL!,
concurrency,
// Install signal handlers for graceful shutdown on SIGINT, SIGTERM, etc
noHandleSignals: false,
pollInterval: 1000,
taskDirectory: `${__dirname}/tasks`,
});
// Immediately await (or otherwise handle) the resulting promise, to avoid
// "unhandled rejection" errors causing a process crash in the event of
// something going wrong. console.log()
await runner.promise;
// If the worker exits (whether through fatal error or otherwise), the above
// promise will resolve/reject.
}
main().catch((err) => {
console.error(err);
process.exit(1);
});

View File

@ -0,0 +1,25 @@
import { createWriteStream } from 'node:fs'
import ffmpeg from 'fluent-ffmpeg'
// test stream from https://ottverse.com/free-hls-m3u8-test-urls/
const playlistUrl = 'https://demo.unified-streaming.com/k8s/features/stable/video/tears-of-steel/tears-of-steel.ism/.m3u8'
const fileOutputStream = createWriteStream('/tmp/test-stream.ts')
ffmpeg()
.input(playlistUrl)
.audioCodec('copy')
.videoCodec('copy')
.addOutputOptions('-movflags faststart')
.output(fileOutputStream)
.format('mpegts')
.on('end', () => {
console.log('Finished');
})
.on('error', (err, stdout, stderr) => {
console.error(`there was an error`);
console.error(err);
console.error(stdout);
console.error(stderr);
throw new Error(err.message);
})
.run();

View File

@ -0,0 +1,135 @@
import { PassThrough, pipeline, Readable } from "stream";
import { type Progress, Upload } from "@aws-sdk/lib-storage";
import { S3Client } from "@aws-sdk/client-s3";
import { createReadStream, createWriteStream } from 'fs';
import { createId } from '@paralleldrive/cuid2';
import prettyBytes from 'pretty-bytes';
import dotenv from 'dotenv'
dotenv.config({
path: '../../.env.development'
})
if (!process.env.S3_BUCKET_NAME) throw new Error('S3_BUCKET_NAME missing in env');
if (!process.env.S3_BUCKET_KEY_ID) throw new Error('S3_BUCKET_KEY_ID missing in env');
if (!process.env.S3_BUCKET_APPLICATION_KEY) throw new Error('S3_BUCKET_APPLICATION_KEY missing in env');
async function uploadStream(client: S3Client, stream: NodeJS.ReadableStream, keyName: string) {
// const pass = new PassThrough()
// Create a stream to the S3 bucket. We use this stream to upload the livestream to Backblaze S3 service
console.log(`keyName=${keyName}`)
const target = {
Bucket: process.env.S3_BUCKET_NAME!,
Key: keyName,
Body: new Readable().wrap(stream)
}
console.log(target)
// greets https://stackoverflow.com/a/70159394/1004931
try {
const parallelUploads3 = new Upload({
client: client,
partSize: 1024 * 1024 * 5,
// tags: [...], // optional tags
queueSize: 1, // optional concurrency configuration
leavePartsOnError: false, // optional manually handle dropped parts
params: target,
});
parallelUploads3.on("httpUploadProgress", (progress) => {
if (progress?.loaded) {
console.log(`loaded ${progress.loaded} bytes (${prettyBytes(progress.loaded)})`);
} else {
console.log(`httpUploadProgress ${JSON.stringify(progress, null, 2)}`)
}
});
const res = await parallelUploads3.done();
return res
} catch (e) {
if (e instanceof Error) {
console.error(`while uploading a file to s3, we encountered an error`)
throw new Error(e.message);
} else {
throw new Error(`error of some sort ${JSON.stringify(e, null, 2)}`)
}
}
}
async function main() {
try {
const client = new S3Client({
endpoint: 'https://s3.us-west-000.backblazeb2.com',
region: 'us-west-000',
credentials: {
accessKeyId: process.env.S3_BUCKET_KEY_ID!,
secretAccessKey: process.env.S3_BUCKET_APPLICATION_KEY!
}
});
// let debugCounter = 0
// let uploadStream = new PassThrough()
// uploadStream.on('data', (data) => {
// debugCounter += data.length
// console.log(`[data] uploadStream. ${debugCounter} aggregated bytes (${prettyBytes(debugCounter)}).`)
// })
// uploadStream.on('drain', () => {
// console.log('[drain] uploadStream')
// })
// uploadStream.on('close', () => {
// console.log(`[close] uploadStream closed`)
// })
// uploadStream.on('error', (err) => {
// console.error('[error] uploadStream had an error as follows')
// console.error(err)
// })
// uploadStream.on('exit', (code) => {
// console.log(`[exit] uploadStream exited with code ${code}`)
// })
// uploadStream.on('disconnect', () => {
// console.log('[disconnect] uploadStream disconnected')
// })
// uploadStream.on('message', (msg) => {
// console.log('[message] uploadStream sent a message as follows')
// console.log(msg)
// })
const fileStream = createReadStream('/home/cj/Downloads/stream-23894234.ts')
const datestamp = new Date().toISOString()
const keyName = `${datestamp}-stream3-chaturbate-${createId()}.ts`
await uploadStream(client, fileStream, keyName)
// // we set up a pipeline which has an readable stream (ffmpeg), a transform stream (debug), and a writable stream (s3 Upload)
// pipeline(
// fileStream,
// uploadStream,
// (err) => {
// if (err) {
// console.error(`pipeline errored.`)
// console.error(err)
// } else {
// console.log('pipeline succeeded.')
// }
// }
// )
} catch (e) {
if (e instanceof Error) {
console.error(`Eyy lookat me, I'm a big nastry try/catch block and I did my jorb!`)
console.error(e)
} else {
console.error('err or some sort')
console.error(e)
}
}
}
main()

View File

@ -0,0 +1,122 @@
import { PassThrough, pipeline, Readable } from "stream";
import { Upload } from "@aws-sdk/lib-storage";
import { S3Client } from "@aws-sdk/client-s3";
import { createReadStream } from 'fs';
import { createId } from '@paralleldrive/cuid2';
import prettyBytes from 'pretty-bytes';
import dotenv from 'dotenv'
dotenv.config({
path: '../../.env.development'
})
if (!process.env.S3_BUCKET_NAME) throw new Error('S3_BUCKET_NAME missing in env');
if (!process.env.S3_BUCKET_KEY_ID) throw new Error('S3_BUCKET_KEY_ID missing in env');
if (!process.env.S3_BUCKET_APPLICATION_KEY) throw new Error('S3_BUCKET_APPLICATION_KEY missing in env');
function makeProgressTicker(counter: number) {
const ticker = setInterval(() => {
console.log(`[progress] ${counter} bytes (aggregate) (${prettyBytes(counter)}) have passed through the pipeline.`)
}, 1000 * 30)
return ticker
}
function makeS3Client() {
const client = new S3Client({
endpoint: 'https://s3.us-west-000.backblazeb2.com',
region: 'us-west-000',
credentials: {
accessKeyId: process.env.S3_BUCKET_KEY_ID!,
secretAccessKey: process.env.S3_BUCKET_APPLICATION_KEY!
}
})
return client
}
async function uploadToS3({ client, uploadStream, keyName }: { client: S3Client, uploadStream: NodeJS.ReadableStream, keyName: string }) {
const target = {
Bucket: process.env.S3_BUCKET_NAME!,
Key: keyName,
Body: new Readable().wrap(uploadStream)
}
// greets https://stackoverflow.com/a/70159394/1004931
try {
const parallelUploads3 = new Upload({
client: client,
partSize: 1024 * 1024 * 5,
queueSize: 1,
leavePartsOnError: false,
params: target,
});
parallelUploads3.on("httpUploadProgress", (progress) => {
console.log(progress)
if (progress?.loaded) {
console.log(`loaded ${progress.loaded} bytes (${prettyBytes(progress.loaded)})`);
} else {
console.log(`httpUploadProgress ${JSON.stringify(progress, null, 2)}`)
}
});
await parallelUploads3.done();
} catch (e) {
if (e instanceof Error) {
console.error(`while uploading a file to s3, we encountered an error`)
throw new Error(e.message);
} else {
throw new Error(`error of some sort ${JSON.stringify(e, null, 2)}`)
}
}
}
async function main() {
let counter = 0
const client = makeS3Client()
const ticker = makeProgressTicker(counter)
const datestamp = new Date().toISOString()
const keyName = `${datestamp}-stream3-chaturbate-${createId()}.ts`
console.log(`Uploading ${keyName} to S3`)
/**
* setup the streams which process the data
*/
const ffmpegStream = createReadStream('/home/cj/Downloads/stream-23894234.ts')
const uploadStream = new PassThrough()
// update the progress ticker data
uploadStream.on('data', (data) => {
counter += data.length
})
/**
* we set up a pipeline which has an readable stream (ffmpeg), a transform stream (debug), and a writable stream (s3 Upload)
*/
pipeline(
ffmpegStream,
uploadStream,
(err) => {
if (err) {
console.error(`pipeline errored.`)
console.error(err)
} else {
console.log('pipeline succeeded.')
}
}
)
await uploadToS3({client, uploadStream, keyName })
clearInterval(ticker)
}
main()

178
packages/capture/src/poc.ts Normal file
View File

@ -0,0 +1,178 @@
/**
*
* @todo if we have the ffmpeg stream send an end event, does the ffmpegStream close?
* so far, we have observed the end of a CB stream, and the uploadStream is what shows as closed.
* It would be better to have the ffmpeg stream do the closing, amirite? or does it even matter?
* Here's what the console.log shows when the CB stream ended while we were not using { end: true } on the ffmpeg stream
*
*
* [data] uploadStream. 118018880 aggregated bytes (118 MB).
[data] uploadStream. 118067384 aggregated bytes (118 MB).
[data] uploadStream. 118101224 aggregated bytes (118 MB).
[data] uploadStream. 118119648 aggregated bytes (118 MB).
[close] uploadStream closed
pipeline succeeded.
*/
import { getRandomRoom } from '@futureporn/scout/cb.js'
import { ua0 } from "@futureporn/scout/ua.js";
import { spawn } from "child_process";
import { PassThrough, pipeline } from "stream";
import { type Progress, Upload } from "@aws-sdk/lib-storage";
import { S3Client } from "@aws-sdk/client-s3";
import { createWriteStream } from 'fs';
import ffmpeg from 'fluent-ffmpeg'
import { createId } from '@paralleldrive/cuid2';
import prettyBytes from 'pretty-bytes';
import dotenv from 'dotenv'
dotenv.config({
path: '../../.env.development'
})
if (!process.env.S3_BUCKET_NAME) throw new Error('S3_BUCKET_NAME missing in env');
if (!process.env.S3_BUCKET_KEY_ID) throw new Error('S3_BUCKET_KEY_ID missing in env');
if (!process.env.S3_BUCKET_APPLICATION_KEY) throw new Error('S3_BUCKET_APPLICATION_KEY missing in env');
async function main() {
const client = new S3Client({
endpoint: 'https://s3.us-west-000.backblazeb2.com',
region: 'us-west-000',
credentials: {
accessKeyId: process.env.S3_BUCKET_KEY_ID!,
secretAccessKey: process.env.S3_BUCKET_APPLICATION_KEY!
}
});
const randomRoom = await getRandomRoom()
console.log(`randomRoom=${randomRoom.name}`)
const playlistUrl: string = await new Promise((resolve, reject) => {
// get the m3u8 playlist for the livestream
const ytdlp = spawn('yt-dlp', [ '-g', randomRoom.url ])
let output = ''
ytdlp.on('error', (err) => {
console.error(err)
})
ytdlp.once('exit', (code) => {
console.log(`code=${code}`)
if (code !== 0) reject(`yt-dlp exited with code ${code}. stderr as follows ${JSON.stringify(ytdlp.stderr, null, 2)}`);
resolve(output)
})
ytdlp.stderr.on('data', (data) => {
console.error('stderr data as follows')
console.error(data.toString())
})
ytdlp.stdout.on('data', (data) => {
output = data
})
})
console.log(`playlistUrl=${playlistUrl}`)
if (!playlistUrl) throw new Error(`failed to get playlistUrl from yt-dlp -g ${randomRoom.url}`);
let debugCounter = 0
let fileOutputStream = createWriteStream('/home/cj/Downloads/outputfile.ts');
// let ffmpegLogStream = createWriteStream('/tmp/ffmpeg-log.txt')
let uploadStream = new PassThrough()
uploadStream.on('data', (data) => {
debugCounter += data.length
console.log(`[data] uploadStream. ${debugCounter} aggregated bytes (${prettyBytes(debugCounter)}).`)
})
uploadStream.on('drain', () => {
console.log('[drain] uploadStream')
})
uploadStream.on('close', () => {
console.log(`[close] uploadStream closed`)
})
uploadStream.on('error', (err) => {
console.error('[error] uploadStream had an error as follows')
console.error(err)
})
uploadStream.on('exit', (code) => {
console.log(`[exit] uploadStream exited with code ${code}`)
})
uploadStream.on('disconnect', () => {
console.log('[disconnect] uploadStream disconnected')
})
uploadStream.on('message', (msg) => {
console.log('[message] uploadStream sent a message as follows')
console.log(msg)
})
const datestamp = new Date().toISOString()
const ffmpegProc = spawn('ffmpeg', [
'-headers', `"User-Agent: ${ua0}"`,
'-i', playlistUrl,
'-c:v', 'copy',
'-c:a', 'copy',
'-movflags', 'faststart',
'-y',
'-f', 'mpegts',
'-loglevel', 'quiet',
'pipe:1'
], {
// ignoring stderr is important because if not, ffmpeg will fill that buffer and node will hang
stdio: ['pipe', 'pipe', 'ignore']
})
// we set up a pipeline which has an readable stream (ffmpeg), a transform stream (debug), and a writable stream (s3 Upload)
pipeline(
ffmpegProc.stdout,
uploadStream,
(err) => {
if (err) {
console.error(`pipeline errored.`)
console.error(err)
} else {
console.log('pipeline succeeded.')
}
}
)
// Create a stream to the S3 bucket. We use this stream to upload the livestream to Backblaze S3 service
const keyName = `${datestamp}-${randomRoom.name}-chaturbate-${createId()}.ts`
console.log(`keyName=${keyName}`)
const target = {
Bucket: process.env.S3_BUCKET_NAME,
Key: keyName,
Body: uploadStream
}
// greets https://stackoverflow.com/a/70159394/1004931
try {
const parallelUploads3 = new Upload({
client: client,
//tags: [...], // optional tags
queueSize: 4, // optional concurrency configuration
leavePartsOnError: false, // optional manually handle dropped parts
params: target,
});
await parallelUploads3.done();
} catch (e) {
if (e instanceof Error) {
console.error(`while uploading a file to s3, we encountered an error`)
throw new Error(e.message);
} else {
throw new Error(`error of some sort ${JSON.stringify(e, null, 2)}`)
}
}
}
main()

View File

@ -1,17 +1,38 @@
import { type Helpers } from 'graphile-worker'
import Record from '../Record.ts'
import 'dotenv/config'
if (!process.env.S3_BUCKET_NAME) throw new Error('S3_BUCKET_NAME was undefined in env');
if (!process.env.S3_ENDPOINT) throw new Error('S3_ENDPOINT was undefined in env');
if (!process.env.S3_REGION) throw new Error('S3_REGION was undefined in env');
if (!process.env.S3_ACCESS_KEY_ID) throw new Error('S3_ACCESS_KEY_ID was undefined in env');
if (!process.env.S3_SECRET_ACCESS_KEY) throw new Error('S3_SECRET_ACCESS_KEY was undefined in env');
type Payload = {
url: string
url: string;
channel: string;
}
export default async function (payload: Payload, helpers: Helpers) {
const { url } = payload;
helpers.logger.info(`'record' task execution begin with url=${url} (@todo implement)`);
const record = new Record({ url: 'https://example.com/stream' })
record.start()
export default async function (payload: Payload, helpers: Helpers): Promise<string> {
const { url, channel } = payload;
helpers.logger.info(`'record' task execution begin with url=${url}, channel=${channel}`);
return record.id
const bucket = process.env.S3_BUCKET_NAME!
const endpoint = process.env.S3_ENDPOINT!
const region = process.env.S3_REGION!
const accessKeyId = process.env.S3_ACCESS_KEY_ID!
const secretAccessKey = process.env.S3_SECRET_ACCESS_KEY!
const s3Client = Record.makeS3Client({ accessKeyId, secretAccessKey, region, endpoint })
const inputStream = Record.getFFmpegDownload({ url })
const record = new Record({ inputStream, bucket, s3Client, channel })
await record.start()
return record.id
};

View File

@ -1,16 +1,26 @@
import { describe } from 'mocha'
import { expect } from 'chai';
import { getInitialRoomDossier } from './cb.js'
import { getInitialRoomDossier, getRandomRoom } from './cb.js'
describe('cb', function () {
describe('getInitialRoomDossier', function () {
/**
* this is an integration test that fails in CI due to CB blocking IP ranges
* @todo use a proxy or something
*/
xit('should return json', async function () {
const dossier = await getInitialRoomDossier('https://chaturbate.com/projektmelody')
expect(dossier).to.have.property('wschat_host')
})
describe('getInitialRoomDossier', function () {
/**
* this is an integration test that fails in CI due to CB blocking IP ranges
* @todo use a proxy or something
*/
xit('should return json', async function () {
const dossier = await getInitialRoomDossier('https://chaturbate.com/projektmelody')
expect(dossier).to.have.property('wschat_host')
})
})
describe('getRandomRoom', function () {
it('should return a Room object of an online room', async function () {
this.timeout(1000*60*2)
const room = await getRandomRoom()
expect(room).to.have.property('url')
expect(room).to.have.property('name')
expect(room.name).to.match(/[a-z_]/)
expect(room.url).to.match(/https:\/\//)
})
})
})

View File

@ -1,4 +1,42 @@
import * as cheerio from 'cheerio'
import fetch from 'node-fetch'
export interface ChaturbateModel {
gender: string;
location: string;
current_show: 'public' | 'private';
username: string;
room_subject: string;
tags: string[];
is_new: boolean;
num_users: number;
num_followers: number;
country: string;
spoken_languages: string;
display_name: string;
birthday: string;
is_hd: boolean;
age: number;
seconds_online: number;
image_url: string;
image_url_360x270: string;
chat_room_url_revshare: string;
iframe_embed_revshare: string;
chat_room_url: string;
iframe_embed: string;
slug: string;
}
export interface ChaturbateOnlineModelsResponse {
results: ChaturbateModel[],
count: number
}
export interface Room {
name: string;
url: string;
}
/**
*
@ -6,29 +44,62 @@ import * as cheerio from 'cheerio'
* @returns {Object} initialRoomDossier
*/
export async function getInitialRoomDossier(roomUrl: string) {
try {
const res = await fetch(roomUrl, {
headers: {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
}
});
const body = await res.text()
const $ = cheerio.load(body);
let rawScript = $('script:contains(window.initialRoomDossier)').html();
if (!rawScript) {
throw new Error('window.initialRoomDossier is null. This could mean the channel is in password mode');
try {
const res = await fetch(roomUrl, {
headers: {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
}
let rawDossier = rawScript.slice(rawScript.indexOf('"'), rawScript.lastIndexOf('"') + 1);
let dossier = JSON.parse(JSON.parse(rawDossier));
});
const body = await res.text()
const $ = cheerio.load(body);
let rawScript = $('script:contains(window.initialRoomDossier)').html();
if (!rawScript) {
throw new Error('window.initialRoomDossier is null. This could mean the channel is in password mode');
}
let rawDossier = rawScript.slice(rawScript.indexOf('"'), rawScript.lastIndexOf('"') + 1);
let dossier = JSON.parse(JSON.parse(rawDossier));
return dossier;
} catch (error) {
if (error instanceof Error) {
// Handle the error gracefully
console.error(`Error fetching initial room dossier: ${error.message}`);
return null; // Or any other appropriate action you want to take
} else {
console.error('caught an exotic error, uh-oh')
}
return dossier;
} catch (error) {
if (error instanceof Error) {
// Handle the error gracefully
console.error(`Error fetching initial room dossier: ${error.message}`);
return null; // Or any other appropriate action you want to take
} else {
console.error('caught an exotic error, uh-oh')
}
}
}
export async function getRandomRoom(): Promise<Room> {
try {
const res = await fetch('https://chaturbate.com/api/public/affiliates/onlinerooms/?wm=DiPkB&client_ip=request_ip');
const data = await res.json() as ChaturbateOnlineModelsResponse;
if (!data || !Array.isArray(data.results) || data.results.length === 0) {
throw new Error('No results found');
}
const results = data.results;
const randomIndex = Math.floor(Math.random() * results.length);
if (!results[randomIndex]) {
throw new Error('No result found at random index');
}
const username = results[randomIndex].username;
return {
url: `https://chaturbate.com/${username}`,
name: username
}
} catch (error) {
if (error instanceof Error) {
console.error(`Error in getRandomRoom: ${error.message}`);
} else {
console.error('An unexpected error occurred');
}
throw error; // Re-throw the error to propagate it further
}
}

View File

@ -43,40 +43,6 @@ importers:
specifier: ^4.16.2
version: 4.16.2
../..: {}
../bot: {}
../capture: {}
../image: {}
../infra: {}
../mailbox: {}
../meal: {}
../next: {}
../old: {}
../scout: {}
../storage: {}
../strapi: {}
../taco: {}
../types: {}
../uppy: {}
../video: {}
../worker: {}
packages:
'@cspotcode/source-map-support@0.8.1':

View File

@ -10,7 +10,7 @@ import { dirname, basename, join, isAbsolute } from 'node:path';
import { fileURLToPath } from 'url';
export const __filename = fileURLToPath(import.meta.url);
export const __dirname = dirname(fileURLToPath(import.meta.url));
const __dirname = dirname(fileURLToPath(import.meta.url));
export function getPackageVersion(packageJsonPath: string): string {
if (!isAbsolute(packageJsonPath)) {

View File

@ -39,7 +39,9 @@ EOF
kubectl --namespace futureporn delete secret capture --ignore-not-found
kubectl --namespace futureporn create secret generic capture \
--from-literal=databaseUrl=${WORKER_DATABASE_URL}
--from-literal=databaseUrl=${WORKER_DATABASE_URL} \
--from-literal=s3AccessKeyId=${S3_ACCESS_KEY_ID} \
--from-literal=s3SecretAccessKey=${S3_SECRET_ACCESS_KEY}
kubectl --namespace futureporn delete secret mailbox --ignore-not-found
kubectl --namespace futureporn create secret generic mailbox \

View File

@ -1,3 +1,12 @@
if [ -z $POSTGRES_PASSWORD ]; then
echo "POSTGRES_PASSWORD was missing in env"
exit 5
fi
## drop futureporn_db
kubectl -n futureporn exec postgres-primary-0 -- psql -U postgres --command "DROP DATABASE futureporn_db WITH (FORCE);"
kubectl -n futureporn exec postgresql-primary-0 -- env PGPASSWORD=${POSTGRES_PASSWORD} psql -U postgres --command "DROP DATABASE futureporn_db WITH (FORCE);"
## drop graphile_worker
kubectl -n futureporn exec postgresql-primary-0 -- env PGPASSWORD=${POSTGRES_PASSWORD} psql -U postgres --command "DROP DATABASE graphile_worker WITH (FORCE);"