fp/services/capture/src/poc-s3-alt.ts

136 lines
4.2 KiB
TypeScript

import { PassThrough, pipeline, Readable } from "stream";
import { type Progress, Upload } from "@aws-sdk/lib-storage";
import { S3Client } from "@aws-sdk/client-s3";
import { createReadStream, createWriteStream } from 'fs';
import { createId } from '@paralleldrive/cuid2';
import prettyBytes from 'pretty-bytes';
import dotenv from 'dotenv'
dotenv.config({
path: '../../.env.development'
})
if (!process.env.S3_BUCKET_NAME) throw new Error('S3_BUCKET_NAME missing in env');
if (!process.env.S3_BUCKET_KEY_ID) throw new Error('S3_BUCKET_KEY_ID missing in env');
if (!process.env.S3_BUCKET_APPLICATION_KEY) throw new Error('S3_BUCKET_APPLICATION_KEY missing in env');
async function uploadStream(client: S3Client, stream: NodeJS.ReadableStream, keyName: string) {
// const pass = new PassThrough()
// Create a stream to the S3 bucket. We use this stream to upload the livestream to Backblaze S3 service
console.log(`keyName=${keyName}`)
const target = {
Bucket: process.env.S3_BUCKET_NAME!,
Key: keyName,
Body: new Readable().wrap(stream)
}
console.log(target)
// greets https://stackoverflow.com/a/70159394/1004931
try {
const parallelUploads3 = new Upload({
client: client,
partSize: 1024 * 1024 * 5,
// tags: [...], // optional tags
queueSize: 1, // optional concurrency configuration
leavePartsOnError: false, // optional manually handle dropped parts
params: target,
});
parallelUploads3.on("httpUploadProgress", (progress) => {
if (progress?.loaded) {
console.log(`loaded ${progress.loaded} bytes (${prettyBytes(progress.loaded)})`);
} else {
console.log(`httpUploadProgress ${JSON.stringify(progress, null, 2)}`)
}
});
const res = await parallelUploads3.done();
return res
} catch (e) {
if (e instanceof Error) {
console.error(`while uploading a file to s3, we encountered an error`)
throw new Error(e.message);
} else {
throw new Error(`error of some sort ${JSON.stringify(e, null, 2)}`)
}
}
}
async function main() {
try {
const client = new S3Client({
endpoint: 'https://s3.us-west-000.backblazeb2.com',
region: 'us-west-000',
credentials: {
accessKeyId: process.env.S3_BUCKET_KEY_ID!,
secretAccessKey: process.env.S3_BUCKET_APPLICATION_KEY!
}
});
// let debugCounter = 0
// let uploadStream = new PassThrough()
// uploadStream.on('data', (data) => {
// debugCounter += data.length
// console.log(`[data] uploadStream. ${debugCounter} aggregated bytes (${prettyBytes(debugCounter)}).`)
// })
// uploadStream.on('drain', () => {
// console.log('[drain] uploadStream')
// })
// uploadStream.on('close', () => {
// console.log(`[close] uploadStream closed`)
// })
// uploadStream.on('error', (err) => {
// console.error('[error] uploadStream had an error as follows')
// console.error(err)
// })
// uploadStream.on('exit', (code) => {
// console.log(`[exit] uploadStream exited with code ${code}`)
// })
// uploadStream.on('disconnect', () => {
// console.log('[disconnect] uploadStream disconnected')
// })
// uploadStream.on('message', (msg) => {
// console.log('[message] uploadStream sent a message as follows')
// console.log(msg)
// })
const fileStream = createReadStream('/home/cj/Downloads/stream-23894234.ts')
const datestamp = new Date().toISOString()
const keyName = `${datestamp}-stream3-chaturbate-${createId()}.ts`
await uploadStream(client, fileStream, keyName)
// // we set up a pipeline which has an readable stream (ffmpeg), a transform stream (debug), and a writable stream (s3 Upload)
// pipeline(
// fileStream,
// uploadStream,
// (err) => {
// if (err) {
// console.error(`pipeline errored.`)
// console.error(err)
// } else {
// console.log('pipeline succeeded.')
// }
// }
// )
} catch (e) {
if (e instanceof Error) {
console.error(`Eyy lookat me, I'm a big nastry try/catch block and I did my jorb!`)
console.error(e)
} else {
console.error('err or some sort')
console.error(e)
}
}
}
main()