create migrations-data
ci / build (push) Failing after 3s
Details
ci / build (push) Failing after 3s
Details
This commit is contained in:
parent
140883a69c
commit
5e83742341
243
Tiltfile
243
Tiltfile
|
@ -188,13 +188,13 @@ k8s_yaml(helm(
|
|||
# ## before you think of switching to valkey, dragonfly, or one of the other redis alternatives, STOP. Uppy is picky.
|
||||
# ## I tested dragonfly, valkey, and KeyDB. Uppy's ioredis client was unable to connect. "ECONNREFUSED" ...
|
||||
# ## Uppy was only happy connecting to official redis.
|
||||
k8s_yaml(helm(
|
||||
'./charts/redis/redis',
|
||||
namespace='futureporn',
|
||||
values=[
|
||||
'./charts/redis/values-overrides.yaml'
|
||||
]
|
||||
))
|
||||
# k8s_yaml(helm(
|
||||
# './charts/redis/redis',
|
||||
# namespace='futureporn',
|
||||
# values=[
|
||||
# './charts/redis/values-overrides.yaml'
|
||||
# ]
|
||||
# ))
|
||||
|
||||
k8s_yaml(helm(
|
||||
'./charts/cert-manager/cert-manager',
|
||||
|
@ -290,33 +290,33 @@ cmd_button('postgres:drop',
|
|||
icon_name='delete',
|
||||
text='DROP all databases'
|
||||
)
|
||||
cmd_button('postgres:refresh',
|
||||
cmd_button('migrations-schema:refresh',
|
||||
argv=['echo', '@todo please restart postgrest container manually.'],
|
||||
resource='migrations',
|
||||
resource='migrations-schema',
|
||||
icon_name='refresh',
|
||||
text='Refresh schema cache'
|
||||
)
|
||||
|
||||
## @todo let's make this get a random room from scout then use the random room to record via POST /recordings
|
||||
cmd_button('capture-worker:create',
|
||||
argv=['./scripts/capture-integration.sh'],
|
||||
resource='capture-worker',
|
||||
icon_name='send',
|
||||
text='Recording Integration Test'
|
||||
cmd_button('migrations-data:refresh',
|
||||
argv=['echo', '@todo please restart postgrest container manually.'],
|
||||
resource='migrations-data',
|
||||
icon_name='refresh',
|
||||
text='Refresh schema cache'
|
||||
)
|
||||
# cmd_button('drupal:init',
|
||||
# argv=['./scripts/drupal-init-wrapper.sh'],
|
||||
# resource='drupal',
|
||||
## @todo let's make this get a random room from scout then use the random room to record via POST /recordings
|
||||
# cmd_button('capture-worker:create',
|
||||
# argv=['./scripts/capture-integration.sh'],
|
||||
# resource='capture-worker',
|
||||
# icon_name='send',
|
||||
# text='Initialize Drupal'
|
||||
# text='Recording Integration Test'
|
||||
# )
|
||||
# k8s_resource(
|
||||
# workload='capture-worker',
|
||||
# labels=['backend'],
|
||||
# resource_deps=['postgrest', 'postgresql-primary'],
|
||||
# )
|
||||
|
||||
cmd_button('postgres:migrate',
|
||||
argv=['./scripts/postgres-migrations.sh'],
|
||||
resource='postgresql-primary',
|
||||
icon_name='directions_run',
|
||||
text='Run migrations',
|
||||
)
|
||||
|
||||
|
||||
|
||||
cmd_button('pgadmin4:restore',
|
||||
argv=['./scripts/pgadmin-import-connection.sh'],
|
||||
|
@ -335,17 +335,31 @@ cmd_button('build:test',
|
|||
## instead of being invoked by helm, we start a container using this image manually via Tilt UI
|
||||
# update_settings(suppress_unused_image_warnings=["fp/migrations"])
|
||||
docker_build(
|
||||
'fp/migrations',
|
||||
'fp/migrations-schema',
|
||||
'.',
|
||||
dockerfile='dockerfiles/migrations.dockerfile',
|
||||
target='migrations',
|
||||
dockerfile='dockerfiles/migrations-schema.dockerfile',
|
||||
target='migrations-schema',
|
||||
pull=False,
|
||||
only=[
|
||||
'./.npmrc',
|
||||
'./package.json',
|
||||
'./pnpm-lock.yaml',
|
||||
'./pnpm-workspace.yaml',
|
||||
'./services/migrations'
|
||||
'./services/migrations-schema'
|
||||
],
|
||||
)
|
||||
docker_build(
|
||||
'fp/migrations-data',
|
||||
'.',
|
||||
dockerfile='dockerfiles/migrations-data.dockerfile',
|
||||
target='migrations-data',
|
||||
pull=False,
|
||||
only=[
|
||||
'./.npmrc',
|
||||
'./package.json',
|
||||
'./pnpm-lock.yaml',
|
||||
'./pnpm-workspace.yaml',
|
||||
'./services/migrations-data'
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -362,16 +376,7 @@ docker_build(
|
|||
pull=False,
|
||||
)
|
||||
|
||||
docker_build(
|
||||
'fp/factory',
|
||||
'.',
|
||||
dockerfile='./dockerfiles/factory.dockerfile',
|
||||
target='dev',
|
||||
live_update=[
|
||||
sync('./services/factory', '/app/services/factory')
|
||||
],
|
||||
pull=False,
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -404,46 +409,46 @@ docker_build(
|
|||
|
||||
|
||||
|
||||
docker_build(
|
||||
'fp/capture',
|
||||
'.',
|
||||
dockerfile='dockerfiles/capture.dockerfile',
|
||||
target='dev',
|
||||
only=[
|
||||
'./.npmrc',
|
||||
'./package.json',
|
||||
'./pnpm-lock.yaml',
|
||||
'./pnpm-workspace.yaml',
|
||||
'./packages/types',
|
||||
'./packages/utils',
|
||||
'./packages/fetchers',
|
||||
'./services/capture',
|
||||
],
|
||||
live_update=[
|
||||
sync('./services/capture', '/app/services/capture'),
|
||||
],
|
||||
pull=False,
|
||||
)
|
||||
# docker_build(
|
||||
# 'fp/capture',
|
||||
# '.',
|
||||
# dockerfile='dockerfiles/capture.dockerfile',
|
||||
# target='dev',
|
||||
# only=[
|
||||
# './.npmrc',
|
||||
# './package.json',
|
||||
# './pnpm-lock.yaml',
|
||||
# './pnpm-workspace.yaml',
|
||||
# './packages/types',
|
||||
# './packages/utils',
|
||||
# './packages/fetchers',
|
||||
# './services/capture',
|
||||
# ],
|
||||
# live_update=[
|
||||
# sync('./services/capture', '/app/services/capture'),
|
||||
# ],
|
||||
# pull=False,
|
||||
# )
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
k8s_resource(
|
||||
workload='scout',
|
||||
resource_deps=['postgresql-primary'],
|
||||
# port_forwards=['8134'],
|
||||
labels=['backend'],
|
||||
)
|
||||
k8s_resource(
|
||||
workload='uppy',
|
||||
links=[
|
||||
link('https://uppy.fp.sbtp.xyz'),
|
||||
],
|
||||
resource_deps=['redis-master'],
|
||||
labels=['backend'],
|
||||
)
|
||||
# k8s_resource(
|
||||
# workload='scout',
|
||||
# resource_deps=['postgresql-primary'],
|
||||
# # port_forwards=['8134'],
|
||||
# labels=['backend'],
|
||||
# )
|
||||
# k8s_resource(
|
||||
# workload='uppy',
|
||||
# links=[
|
||||
# link('https://uppy.fp.sbtp.xyz'),
|
||||
# ],
|
||||
# resource_deps=['redis-master'],
|
||||
# labels=['backend'],
|
||||
# )
|
||||
k8s_resource(
|
||||
workload='next',
|
||||
links=[
|
||||
|
@ -473,58 +478,19 @@ k8s_resource(
|
|||
labels=['database']
|
||||
)
|
||||
|
||||
# k8s_resource(
|
||||
# workload='mariadb',
|
||||
# labels=['database']
|
||||
# )
|
||||
# k8s_resource(
|
||||
# workload='drupal',
|
||||
# resource_deps=['mariadb'],
|
||||
# labels=['backend'],
|
||||
# port_forwards=['9797:8080'],
|
||||
# links=[
|
||||
# link('https://drupal.fp.sbtp.xyz'),
|
||||
# ],
|
||||
# )
|
||||
k8s_resource(
|
||||
workload='chart-velero',
|
||||
resource_deps=['postgresql-primary'],
|
||||
labels=['backend'],
|
||||
)
|
||||
k8s_resource(
|
||||
workload='chart-velero-upgrade-crds',
|
||||
resource_deps=['postgresql-primary'],
|
||||
labels=['backend'],
|
||||
)
|
||||
|
||||
|
||||
# k8s_resource(
|
||||
# workload='logto',
|
||||
# port_forwards=['3001', '3002'],
|
||||
# links=[
|
||||
# link('https://logto.fp.sbtp.xyz'),
|
||||
# link('https://logto-admin.fp.sbtp.xyz'),
|
||||
# ],
|
||||
# workload='chart-velero',
|
||||
# resource_deps=['postgresql-primary'],
|
||||
# labels=['backend'],
|
||||
# )
|
||||
# k8s_resource(
|
||||
# workload='logto-database-seed',
|
||||
# labels=['database'],
|
||||
# )
|
||||
# k8s_resource(
|
||||
# workload='phpmyadmin',
|
||||
# port_forwards=['5151:8080'],
|
||||
# labels=['database'],
|
||||
# )
|
||||
|
||||
# k8s_resource(
|
||||
# workload='supertokens',
|
||||
# links=[
|
||||
# link('https://supertokens.fp.sbtp.xyz'),
|
||||
# ],
|
||||
# workload='chart-velero-upgrade-crds',
|
||||
# resource_deps=['postgresql-primary'],
|
||||
# labels=['backend'],
|
||||
# )
|
||||
|
||||
|
||||
k8s_resource(
|
||||
workload='keycloak',
|
||||
links=[
|
||||
|
@ -583,21 +549,27 @@ k8s_resource(
|
|||
workload='factory',
|
||||
labels=['backend'],
|
||||
)
|
||||
docker_build(
|
||||
'fp/factory',
|
||||
'.',
|
||||
dockerfile='./dockerfiles/factory.dockerfile',
|
||||
target='dev',
|
||||
live_update=[
|
||||
sync('./services/factory', '/app/services/factory')
|
||||
],
|
||||
pull=False,
|
||||
)
|
||||
|
||||
# k8s_resource(
|
||||
# workload='redis-master',
|
||||
# labels=['cache']
|
||||
# )
|
||||
# k8s_resource(
|
||||
# workload='bot',
|
||||
# labels=['backend'],
|
||||
# resource_deps=['postgrest'],
|
||||
# )
|
||||
|
||||
k8s_resource(
|
||||
workload='redis-master',
|
||||
labels=['cache']
|
||||
)
|
||||
k8s_resource(
|
||||
workload='bot',
|
||||
labels=['backend'],
|
||||
resource_deps=['postgrest'],
|
||||
)
|
||||
k8s_resource(
|
||||
workload='capture-worker',
|
||||
labels=['backend'],
|
||||
resource_deps=['postgrest', 'postgresql-primary'],
|
||||
)
|
||||
# k8s_resource(
|
||||
# workload='chihaya',
|
||||
# labels=['backend']
|
||||
|
@ -625,7 +597,12 @@ k8s_resource(
|
|||
labels=['database'],
|
||||
)
|
||||
k8s_resource(
|
||||
workload='migrations',
|
||||
workload='migrations-schema',
|
||||
labels=['database'],
|
||||
resource_deps=['postgresql-primary'],
|
||||
)
|
||||
k8s_resource(
|
||||
workload='migrations-data',
|
||||
labels=['database'],
|
||||
resource_deps=['postgresql-primary'],
|
||||
)
|
||||
|
|
|
@ -3,14 +3,14 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: migrations
|
||||
name: migrations-data
|
||||
namespace: futureporn
|
||||
labels:
|
||||
app.kubernetes.io/name: migrations
|
||||
app.kubernetes.io/name: migrations-data
|
||||
spec:
|
||||
containers:
|
||||
- name: migrations
|
||||
image: "{{ .Values.migrations.imageName }}"
|
||||
- name: migrations-data
|
||||
image: "{{ .Values.migrations.data.imageName }}"
|
||||
resources: {}
|
||||
env:
|
||||
- name: DATABASE_PASSWORD
|
|
@ -0,0 +1,21 @@
|
|||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: migrations-schema
|
||||
namespace: futureporn
|
||||
labels:
|
||||
app.kubernetes.io/name: migrations-schema
|
||||
spec:
|
||||
containers:
|
||||
- name: migrations-schema
|
||||
image: "{{ .Values.migrations.schema.imageName }}"
|
||||
resources: {}
|
||||
env:
|
||||
- name: DATABASE_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgresql
|
||||
key: password
|
||||
restartPolicy: Never
|
|
@ -19,7 +19,7 @@ spec:
|
|||
value: "{{ .Values.uppy.url }}"
|
||||
- name: NEXT_PUBLIC_POSTGREST_URL
|
||||
value: {{ printf "https://%s" .Values.postgrest.hostname | quote }}
|
||||
- name: NEXT_PUBLIC_WEBSITE_DOMAIN
|
||||
- name: NEXT_PUBLIC_URL
|
||||
value: {{ printf "https://%s" .Values.next.hostname | quote }}
|
||||
- name: NEXT_PUBLIC_API_DOMAIN
|
||||
value: {{ .Values.next.hostname | quote }}
|
||||
|
@ -42,13 +42,6 @@ spec:
|
|||
secretKeyRef:
|
||||
name: patreon
|
||||
key: clientSecret
|
||||
- name: SUPERTOKENS_API_KEYS
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: supertokens
|
||||
key: apiKeys
|
||||
- name: SUPERTOKENS_URL
|
||||
value: {{ printf "https://%s" .Values.supertokens.hostname | quote }}
|
||||
- name: KEYCLOAK_CLIENT_ID
|
||||
value: futureporn
|
||||
- name: KEYCLOAK_CLIENT_SECRET
|
||||
|
@ -58,6 +51,10 @@ spec:
|
|||
key: clientSecret
|
||||
- name: KEYCLOAK_ISSUER
|
||||
value: {{ .Values.keycloak.issuer | quote }}
|
||||
- name: KEYCLOAK_URL
|
||||
value: {{ printf "https://%s" .Values.keycloak.hostname | quote }}
|
||||
- name: KEYCLOAK_LOCAL_URL
|
||||
value: {{ .Values.keycloak.localUrl | quote }}
|
||||
ports:
|
||||
- name: web
|
||||
containerPort: 3000
|
||||
|
|
|
@ -98,6 +98,7 @@ supertokens:
|
|||
replicas: 1
|
||||
keycloak:
|
||||
hostname: keycloak.fp.sbtp.xyz
|
||||
localUrl: http://keycloak.futureporn.svc.cluster.local:8080
|
||||
replicas: 1
|
||||
issuer: https://keycloak.fp.sbtp.xyz/realms/futureporn
|
||||
logto:
|
||||
|
@ -111,4 +112,7 @@ whoami:
|
|||
hostname: whoami.fp.sbtp.xyz
|
||||
port: 8888
|
||||
migrations:
|
||||
imageName: fp/migrations
|
||||
schema:
|
||||
imageName: fp/migrations-schema
|
||||
data:
|
||||
imageName: fp/migrations-data
|
|
@ -6,15 +6,15 @@ RUN corepack enable && corepack prepare pnpm@9.6.0 --activate
|
|||
|
||||
FROM base AS build
|
||||
COPY ./pnpm-workspace.yaml ./.npmrc .
|
||||
COPY ./services/migrations/package.json ./services/migrations/pnpm-lock.yaml ./services/migrations/
|
||||
COPY ./services/migrations-data/package.json ./services/migrations-data/pnpm-lock.yaml ./services/migrations-data/
|
||||
RUN --mount=type=cache,id=pnpm,target=/pnpm/store pnpm fetch
|
||||
RUN --mount=type=cache,id=pnpm,target=/pnpm/store pnpm install --frozen-lockfile --prefer-offline
|
||||
COPY ./services/migrations/ ./services/migrations/
|
||||
RUN pnpm --filter=@futureporn/migrations deploy --prod /prod/migrations
|
||||
RUN ls -las /prod/migrations
|
||||
COPY ./services/migrations-data/ ./services/migrations-data/
|
||||
RUN pnpm --filter=@futureporn/migrations-data deploy --prod /prod/migrations-data
|
||||
RUN ls -las /prod/migrations-data
|
||||
|
||||
FROM base AS migrations
|
||||
FROM base AS migrations-data
|
||||
ENV NODE_ENV=production
|
||||
COPY --from=build /prod/migrations .
|
||||
COPY --from=build /prod/migrations-data .
|
||||
ENTRYPOINT ["pnpm", "start"]
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
FROM node:20-alpine AS base
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
ENV PATH="$PNPM_HOME:$PATH"
|
||||
WORKDIR /app
|
||||
RUN corepack enable && corepack prepare pnpm@9.6.0 --activate
|
||||
|
||||
FROM base AS build
|
||||
COPY ./pnpm-workspace.yaml ./.npmrc .
|
||||
COPY ./services/migrations-schema/package.json ./services/migrations-schema/pnpm-lock.yaml ./services/migrations-schema/
|
||||
RUN --mount=type=cache,id=pnpm,target=/pnpm/store pnpm fetch
|
||||
RUN --mount=type=cache,id=pnpm,target=/pnpm/store pnpm install --frozen-lockfile --prefer-offline
|
||||
COPY ./services/migrations-schema/ ./services/migrations-schema/
|
||||
RUN pnpm --filter=@futureporn/migrations-schema deploy --prod /prod/migrations-schema
|
||||
RUN ls -las /prod/migrations-schema
|
||||
|
||||
FROM base AS migrations-schema
|
||||
ENV NODE_ENV=production
|
||||
COPY --from=build /prod/migrations-schema .
|
||||
ENTRYPOINT ["pnpm", "start"]
|
||||
|
|
@ -297,12 +297,25 @@ export interface IPlatformNotificationResponse {
|
|||
}
|
||||
|
||||
|
||||
export interface IVodsResponse {
|
||||
id: string
|
||||
}
|
||||
|
||||
export interface IVod {
|
||||
id: string
|
||||
id: number;
|
||||
uuid: string;
|
||||
stream?: IStream;
|
||||
date: string;
|
||||
date2: string;
|
||||
mux_asset?: IMuxAsset;
|
||||
vtuber?: IVtuber;
|
||||
cuid?: string;
|
||||
tag_vod_relations?: any;
|
||||
video240Hash?: string;
|
||||
videoSrcHash?: string;
|
||||
timestamps?: any;
|
||||
announce_title?: string;
|
||||
announce_url?: string;
|
||||
videoSrcB2?: any;
|
||||
uploader: any;
|
||||
note: string;
|
||||
}
|
||||
|
||||
|
||||
|
@ -312,7 +325,7 @@ export interface IStream {
|
|||
date_2: string;
|
||||
archive_status: ArchiveStatus;
|
||||
vods: IVod[];
|
||||
cuid: string;
|
||||
uuid: string;
|
||||
vtuber: IVtuber;
|
||||
is_chaturbate_stream: boolean;
|
||||
is_fansly_stream: boolean;
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
2024-10-25-from-strapi-to-postgrest-mk2.sql
|
|
@ -1,350 +0,0 @@
|
|||
SELECT dblink_connect(
|
||||
'old_db_conn',
|
||||
'dbname=futureporn_strapi_old user=postgres passfile=/tmp/.pgpass'
|
||||
);
|
||||
|
||||
|
||||
-- Temporary schema changes that I don't want to save in @futureporn/migrations
|
||||
-- 1. ADD api.s3_files.id_old
|
||||
-- 2. ADD api.vods.id_old
|
||||
-- 3. ADD api.vods_s3_join.[id_old,vod_id_old,b_2_file_id_old]
|
||||
-- 4. ADD api.vtubers.id_old
|
||||
-- 5. ADD api.vods_s3_files_joins.id_old
|
||||
ALTER TABLE IF EXISTS api.s3_files
|
||||
ADD COLUMN IF NOT EXISTS id_old int;
|
||||
|
||||
ALTER TABLE IF EXISTS api.vods
|
||||
ADD COLUMN IF NOT EXISTS id_old int;
|
||||
|
||||
ALTER TABLE api.vods_s3_files_joins
|
||||
ADD COLUMN IF NOT EXISTS id_old int;
|
||||
|
||||
ALTER TABLE api.vods_s3_files_joins
|
||||
ADD COLUMN IF NOT EXISTS vod_id_old int;
|
||||
|
||||
ALTER TABLE api.vods_s3_files_joins
|
||||
ADD COLUMN IF NOT EXISTS b_2_file_id_old int;
|
||||
|
||||
ALTER TABLE IF EXISTS api.vtubers
|
||||
ADD COLUMN IF NOT EXISTS id_old int;
|
||||
|
||||
ALTER TABLE api.vods_s3_files_joins
|
||||
ADD COLUMN IF NOT EXISTS id_old int;
|
||||
|
||||
ALTER TABLE api.vods_s3_files_joins
|
||||
ADD COLUMN IF NOT EXISTS s3_file_id UUID;
|
||||
|
||||
ALTER TABLE api.vods_s3_files_joins
|
||||
ADD COLUMN IF NOT EXISTS s3_file_id_old int;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS api.a_temporary_vods (
|
||||
id integer,
|
||||
video_src_hash character varying,
|
||||
video_720_hash character varying,
|
||||
video_480_hash character varying,
|
||||
video_360_hash character varying,
|
||||
video_240_hash character varying,
|
||||
thin_hash character varying,
|
||||
thicc_hash character varying,
|
||||
announce_title character varying,
|
||||
announce_url character varying,
|
||||
note text,
|
||||
date timestamp(6) without time zone,
|
||||
spoilers text,
|
||||
created_at timestamp(6) without time zone,
|
||||
updated_at timestamp(6) without time zone,
|
||||
published_at timestamp(6) without time zone,
|
||||
created_by_id integer,
|
||||
updated_by_id integer,
|
||||
title character varying,
|
||||
chat_log text,
|
||||
date_2 character varying,
|
||||
cuid character varying,
|
||||
archive_status character varying
|
||||
);
|
||||
|
||||
-- Enable the dblink extension
|
||||
-- this lets us copy data between two different databases
|
||||
-- in our case, we are copying tables from futureporn_strapi_old.public.streams to futureporn.api.streams
|
||||
CREATE EXTENSION IF NOT EXISTS dblink;
|
||||
|
||||
|
||||
SELECT dblink_connect(
|
||||
'old_db_conn',
|
||||
'dbname=futureporn_strapi_old user=postgres passfile=/tmp/.pgpass'
|
||||
);
|
||||
|
||||
|
||||
-- Migrate vtubers table
|
||||
INSERT INTO api.vtubers (
|
||||
id,
|
||||
id_old,
|
||||
chaturbate,
|
||||
twitter,
|
||||
patreon,
|
||||
twitch,
|
||||
tiktok,
|
||||
onlyfans,
|
||||
youtube,
|
||||
linktree,
|
||||
carrd,
|
||||
fansly,
|
||||
pornhub,
|
||||
discord,
|
||||
reddit,
|
||||
throne,
|
||||
instagram,
|
||||
facebook,
|
||||
merch,
|
||||
slug,
|
||||
image,
|
||||
display_name,
|
||||
description1,
|
||||
description2,
|
||||
created_at,
|
||||
updated_at,
|
||||
theme_color,
|
||||
image_blur
|
||||
)
|
||||
SELECT DISTINCT
|
||||
gen_random_uuid() AS id,
|
||||
v.id AS id_old,
|
||||
v.chaturbate,
|
||||
v.twitter,
|
||||
v.patreon,
|
||||
v.twitch,
|
||||
v.tiktok,
|
||||
v.onlyfans,
|
||||
v.youtube,
|
||||
v.linktree,
|
||||
v.carrd,
|
||||
v.fansly,
|
||||
v.pornhub,
|
||||
v.discord,
|
||||
v.reddit,
|
||||
v.throne,
|
||||
v.instagram,
|
||||
v.facebook,
|
||||
v.merch,
|
||||
v.slug,
|
||||
v.image,
|
||||
v.display_name,
|
||||
v.description_1,
|
||||
v.description_2,
|
||||
v.created_at,
|
||||
v.updated_at,
|
||||
v.theme_color,
|
||||
v.image_blur
|
||||
FROM dblink('old_db_conn', 'SELECT id,
|
||||
chaturbate,
|
||||
twitter,
|
||||
patreon,
|
||||
twitch,
|
||||
tiktok,
|
||||
onlyfans,
|
||||
youtube,
|
||||
linktree,
|
||||
carrd,
|
||||
fansly,
|
||||
pornhub,
|
||||
discord,
|
||||
reddit,
|
||||
throne,
|
||||
instagram,
|
||||
facebook,
|
||||
merch,
|
||||
slug,
|
||||
image,
|
||||
display_name,
|
||||
description_1,
|
||||
description_2,
|
||||
created_at,
|
||||
updated_at,
|
||||
published_at,
|
||||
created_by_id,
|
||||
updated_by_id,
|
||||
theme_color,
|
||||
image_blur
|
||||
FROM public.vtubers')
|
||||
AS v(
|
||||
id integer,
|
||||
chaturbate character varying(255),
|
||||
twitter character varying(255),
|
||||
patreon character varying(255),
|
||||
twitch character varying(255),
|
||||
tiktok character varying(255),
|
||||
onlyfans character varying(255),
|
||||
youtube character varying(255),
|
||||
linktree character varying(255),
|
||||
carrd character varying(255),
|
||||
fansly character varying(255),
|
||||
pornhub character varying(255),
|
||||
discord character varying(255),
|
||||
reddit character varying(255),
|
||||
throne character varying(255),
|
||||
instagram character varying(255),
|
||||
facebook character varying(255),
|
||||
merch character varying(255),
|
||||
slug character varying(255),
|
||||
image character varying(255),
|
||||
display_name character varying(255),
|
||||
description_1 text,
|
||||
description_2 text,
|
||||
created_at timestamp(6) without time zone,
|
||||
updated_at timestamp(6) without time zone,
|
||||
published_at timestamp(6) without time zone,
|
||||
created_by_id integer,
|
||||
updated_by_id integer,
|
||||
theme_color character varying(255),
|
||||
image_blur character varying(255)
|
||||
);
|
||||
|
||||
|
||||
|
||||
-- Migrate streams table
|
||||
-- here we are taking the pre-existing data from the strapi database
|
||||
-- and copying it to the postgrest database.
|
||||
-- some of the columns like vtuber need to be set to NULL because they are new and strapi streams table didn't contain that info
|
||||
INSERT INTO api.streams (platform_notification_type, date, vtuber, tweet, archive_status, is_chaturbate_stream, is_fansly_stream)
|
||||
SELECT DISTINCT
|
||||
NULL AS platform_notification_type,
|
||||
s.date,
|
||||
NULL::UUID AS vtuber,
|
||||
NULL AS tweet,
|
||||
s.archive_status,
|
||||
s.is_chaturbate_stream,
|
||||
s.is_fansly_stream
|
||||
FROM dblink('old_db_conn', 'SELECT date, archive_status, is_chaturbate_stream, is_fansly_stream FROM public.streams')
|
||||
AS s(
|
||||
date timestamp,
|
||||
archive_status character varying,
|
||||
is_chaturbate_stream boolean,
|
||||
is_fansly_stream boolean
|
||||
);
|
||||
|
||||
|
||||
-- Migrate vods b2_files join table
|
||||
-- previously public.vods_video_src_b_2_links
|
||||
-- new api.vods_s3_join
|
||||
INSERT INTO api.vods_s3_files_joins (id, id_old, vod_id, vod_id_old, s3_file_id, s3_file_id_old)
|
||||
SELECT DISTINCT
|
||||
gen_random_uuid() AS id,
|
||||
old.id AS id_old,
|
||||
NULL::UUID AS vod_id,
|
||||
old.vod_id AS vod_id_old,
|
||||
NULL::UUID AS s3_file_id,
|
||||
old.b_2_file_id AS s3_file_id_old
|
||||
FROM dblink('old_db_conn', 'SELECT id, vod_id, b_2_file_id FROM public.vods_video_src_b_2_links')
|
||||
AS old(
|
||||
id int,
|
||||
vod_id int,
|
||||
b_2_file_id int
|
||||
);
|
||||
|
||||
|
||||
|
||||
|
||||
-- Migrate B2 table
|
||||
INSERT INTO api.s3_files (
|
||||
id,
|
||||
id_old,
|
||||
s3_id,
|
||||
s3_key,
|
||||
created_at,
|
||||
updated_at,
|
||||
bucket,
|
||||
cdn_url
|
||||
)
|
||||
SELECT
|
||||
gen_random_uuid()::UUID AS id,
|
||||
b2_file.id::INT AS id_old,
|
||||
b2_file.upload_id::TEXT AS s3_id,
|
||||
b2_file.key::TEXT AS s3_key,
|
||||
b2_file.created_at::TIMESTAMP(6) WITHOUT TIME ZONE AS created_at,
|
||||
b2_file.updated_at::TIMESTAMP(6) WITHOUT TIME ZONE AS updated_at,
|
||||
'futureporn-b2'::TEXT AS bucket,
|
||||
b2_file.cdn_url::TEXT AS cdn_url
|
||||
FROM
|
||||
dblink('old_db_conn', 'SELECT id, key, upload_id, created_at, updated_at, cdn_url FROM public.b2_files') AS b2_file (
|
||||
id integer,
|
||||
key character varying(255),
|
||||
upload_id character varying(255),
|
||||
created_at timestamp(6) without time zone,
|
||||
updated_at timestamp(6) without time zone,
|
||||
cdn_url character varying(255)
|
||||
);
|
||||
|
||||
|
||||
-- Migrate vods table
|
||||
INSERT INTO api.vods (
|
||||
id,
|
||||
id_old,
|
||||
stream_id,
|
||||
created_at,
|
||||
updated_at,
|
||||
title,
|
||||
date,
|
||||
note,
|
||||
ipfs_cid,
|
||||
s3_file,
|
||||
announce_title,
|
||||
announce_url,
|
||||
status
|
||||
)
|
||||
SELECT
|
||||
gen_random_uuid(),
|
||||
vods.id,
|
||||
NULL,
|
||||
vods.created_at,
|
||||
vods.updated_at,
|
||||
vods.title,
|
||||
vods.date::date,
|
||||
vods.note,
|
||||
vods.video_src_hash,
|
||||
NULL, -- old vods doesn't contain this info-- the join table is needed
|
||||
vods.announce_title,
|
||||
vods.announce_url,
|
||||
'pending_recording'
|
||||
FROM
|
||||
dblink('old_db_conn', 'SELECT * FROM public.vods') AS vods (
|
||||
id integer,
|
||||
video_src_hash character varying,
|
||||
video_720_hash character varying,
|
||||
video_480_hash character varying,
|
||||
video_360_hash character varying,
|
||||
video_240_hash character varying,
|
||||
thin_hash character varying,
|
||||
thicc_hash character varying,
|
||||
announce_title character varying,
|
||||
announce_url character varying,
|
||||
note text,
|
||||
date timestamp(6) without time zone,
|
||||
spoilers text,
|
||||
created_at timestamp(6) without time zone,
|
||||
updated_at timestamp(6) without time zone,
|
||||
published_at timestamp(6) without time zone,
|
||||
created_by_id integer,
|
||||
updated_by_id integer,
|
||||
title character varying,
|
||||
chat_log text,
|
||||
date_2 character varying,
|
||||
cuid character varying,
|
||||
archive_status character varying
|
||||
)
|
||||
LEFT JOIN (
|
||||
-- Fetching vods_vtuber_links from the old database
|
||||
SELECT *
|
||||
FROM dblink('old_db_conn', 'SELECT vod_id, vtuber_id FROM public.vods_vtuber_links') AS links (
|
||||
vod_id integer,
|
||||
vtuber_id integer
|
||||
)
|
||||
) AS links ON vods.id = links.vod_id
|
||||
LEFT JOIN api.vtubers AS vtubers
|
||||
ON links.vtuber_id = vtubers.id_old; -- Map the old `vtuber_id` to the new `uuid` in `vtubers`
|
||||
|
||||
|
||||
|
||||
-- Now we copy patron data from the old Strapi table up_user
|
||||
-- Going forward we are changing how Patrons table is populated.
|
||||
|
||||
-- FROM up_user
|
|
@ -1,28 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
postgres_pod_name=postgresql-primary-0
|
||||
|
||||
if [ -z $POSTGRES_PASSWORD ]; then
|
||||
echo "POSTGRES_PASSWORD was missing in env. Please run using dotenvx or similar"
|
||||
exit 5
|
||||
fi
|
||||
|
||||
|
||||
if [ -z "$1" ]
|
||||
then
|
||||
echo "Usage: a.migration.sh /path/to/migraiton.sql"
|
||||
exit 6
|
||||
fi
|
||||
|
||||
echo "create .pgpass file inside pod"
|
||||
kubectl -n futureporn exec -i ${postgres_pod_name} -- bash -c "echo *:5432:*:postgres:${POSTGRES_PASSWORD} | tee /tmp/.pgpass"
|
||||
kubectl -n futureporn exec -i ${postgres_pod_name} -- chmod 0600 /tmp/.pgpass
|
||||
|
||||
echo "Copying sql to pod"
|
||||
kubectl -n futureporn cp ${1} ${postgres_pod_name}:/tmp/migration.sql
|
||||
|
||||
echo "Running ${1} inside the pod"
|
||||
kubectl -n futureporn exec -i ${postgres_pod_name} -- env PGPASSWORD=${POSTGRES_PASSWORD} psql -U postgres -d futureporn -f /tmp/migration.sql
|
||||
|
||||
echo "rm .pgpass file"
|
||||
kubectl -n futureporn exec -i ${postgres_pod_name} -- rm -rf /tmp/.pgpass
|
|
@ -1,14 +0,0 @@
|
|||
|
||||
SELECT
|
||||
gen_random_uuid() AS id,
|
||||
vods.id AS id_old,
|
||||
links.vod_id AS vod_id_old,
|
||||
links.vtuber_id AS vtuber,
|
||||
stream_links.stream_id AS stream
|
||||
|
||||
FROM public_strapi_old.vods AS vods
|
||||
LEFT JOIN public_strapi_old.vods_vtuber_links AS links
|
||||
ON vods.id = links.vod_id
|
||||
|
||||
LEFT JOIN public_strapi_old.vods_stream_links AS stream_links
|
||||
ON vods.id = stream_links.vod_id
|
|
@ -8,7 +8,7 @@
|
|||
"test": "mocha",
|
||||
"dev": "pnpm run dev.nodemon # yes this is crazy to have nodemon execute tsx, but it's the only way I have found to get live reloading in TS/ESM/docker with Graphile Worker's way of loading tasks",
|
||||
"dev.tsx": "tsx ./src/index.ts",
|
||||
"dev.nodemon": "nodemon --ext ts --exec \"pnpm run dev.tsx\"",
|
||||
"dev.nodemon": "nodemon --exitcrash --ext ts --exec \"pnpm run dev.tsx\"",
|
||||
"dev.node": "node --no-warnings=ExperimentalWarning --loader ts-node/esm src/index.ts"
|
||||
},
|
||||
"keywords": [
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
# @futureporn/migrations-data
|
||||
|
||||
Here we handle data migrations for the postgrest database.
|
||||
|
||||
@see https://github.com/zakpatterson/postgres-schema-migrations
|
||||
|
||||
Reminder: only write migrations that affect data. (don't write migrations that affect schema)
|
||||
|
||||
## K.I.S.S.
|
||||
|
||||
Keep It Stupidly Simple.
|
||||
|
||||
We are keeping this module as simple as possible. This means pure JS (no typescript!)
|
||||
|
||||
|
||||
## troubleshooting
|
||||
|
||||
If you see the following error, graphile_worker likely hasn't had a chance to create it's functions. Make sure that a graphile_worker is running, so it can automatically create the necessary functions.
|
||||
|
||||
```json
|
||||
{
|
||||
"code": "42883",
|
||||
"details": null,
|
||||
"hint": "No function matches the given name and argument types. You might need to add explicit type casts.",
|
||||
"message": "function graphile_worker.add_job(text, json, max_attempts => integer) does not exist"
|
||||
}
|
||||
```
|
|
@ -0,0 +1,68 @@
|
|||
import pg from 'pg'
|
||||
import { migrate } from 'postgres-schema-migrations';
|
||||
import path, { dirname } from 'node:path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import 'dotenv/config';
|
||||
|
||||
const { Client } = pg
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
|
||||
if (!process.env.DATABASE_PASSWORD) throw new Error('DATABASE_PASSWORD is missing in env');
|
||||
|
||||
/*
|
||||
* Here we set up a Foreign Data Wrapper which connects us to the old strapi database.
|
||||
* From this Strapi db futureporn-old, we migrate data to futureporn database.
|
||||
*/
|
||||
|
||||
async function setupForeignDataWrapper(client) {
|
||||
|
||||
|
||||
|
||||
// Run the SQL commands
|
||||
const sql = `
|
||||
BEGIN;
|
||||
|
||||
CREATE EXTENSION IF NOT EXISTS postgres_fdw;
|
||||
|
||||
CREATE SERVER IF NOT EXISTS futureporn_old
|
||||
FOREIGN DATA WRAPPER postgres_fdw
|
||||
OPTIONS (dbname 'futureporn_old');
|
||||
|
||||
CREATE USER MAPPING IF NOT EXISTS FOR postgres
|
||||
SERVER futureporn_old
|
||||
OPTIONS (password_required 'true', password '${process.env.DATABASE_PASSWORD}');
|
||||
|
||||
COMMIT;
|
||||
`;
|
||||
|
||||
await client.query(sql);
|
||||
console.log('Foreign Data Wrapper setup completed successfully.');
|
||||
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const dbConfig = {
|
||||
database: "futureporn",
|
||||
user: "postgres",
|
||||
password: process.env.DATABASE_PASSWORD,
|
||||
host: 'postgresql-primary.futureporn.svc.cluster.local',
|
||||
port: 5432,
|
||||
}
|
||||
const client = new Client(dbConfig)
|
||||
await client.connect()
|
||||
const migrateConfig = {
|
||||
client,
|
||||
ensureDatabaseExists: false,
|
||||
defaultDatabase: 'postgres'
|
||||
}
|
||||
|
||||
try {
|
||||
await setupForeignDataWrapper(client)
|
||||
await migrate(migrateConfig, path.join(__dirname, "./migrations/"), { logger: console.log, schema: 'migrations_data' })
|
||||
} finally {
|
||||
await client.end()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
await main()
|
|
@ -1,17 +1,4 @@
|
|||
|
||||
BEGIN;
|
||||
|
||||
CREATE EXTENSION IF NOT EXISTS postgres_fdw;
|
||||
|
||||
CREATE SERVER IF NOT EXISTS futureporn_old
|
||||
FOREIGN DATA WRAPPER postgres_fdw
|
||||
OPTIONS (dbname 'futureporn_old');
|
||||
|
||||
CREATE USER MAPPING IF NOT EXISTS FOR postgres
|
||||
SERVER futureporn_old
|
||||
OPTIONS (password_required 'true', password ''); -- @todo add password here
|
||||
|
||||
|
||||
-- 2024-10-25
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -58,6 +45,8 @@ CREATE USER MAPPING IF NOT EXISTS FOR postgres
|
|||
*
|
||||
*/
|
||||
|
||||
SET search_path TO 'public';
|
||||
|
||||
CREATE FOREIGN TABLE external_b2_files
|
||||
(
|
||||
id INT,
|
||||
|
@ -274,6 +263,7 @@ CREATE FOREIGN TABLE external_vods
|
|||
published_at date,
|
||||
title text,
|
||||
date date NOT NULL,
|
||||
date_2 timestamp without time zone,
|
||||
mux_asset INT,
|
||||
thumbnail INT,
|
||||
vtuber INT,
|
||||
|
@ -584,6 +574,7 @@ INSERT INTO api.streams (
|
|||
id,
|
||||
platform_notification_type,
|
||||
date,
|
||||
date_2,
|
||||
created_at,
|
||||
updated_at,
|
||||
vtuber_num,
|
||||
|
@ -595,7 +586,8 @@ OVERRIDING SYSTEM VALUE
|
|||
SELECT
|
||||
streams.id,
|
||||
NULL AS platform_notification_type, -- Modify if necessary
|
||||
streams.date_2::TIMESTAMP WITHOUT TIME ZONE AS date,
|
||||
streams.date_2::TIMESTAMP WITH TIME ZONE AS date,
|
||||
streams.date_2::TIMESTAMP WITH TIME ZONE AS date_2,
|
||||
streams.created_at,
|
||||
streams.updated_at,
|
||||
links.vtuber_id AS vtuber_num,
|
||||
|
@ -630,7 +622,8 @@ INSERT INTO api.vods (
|
|||
updated_at,
|
||||
published_at,
|
||||
title,
|
||||
date,
|
||||
date,
|
||||
date_2,
|
||||
note,
|
||||
ipfs_cid,
|
||||
announce_title,
|
||||
|
@ -644,7 +637,8 @@ SELECT
|
|||
vods.updated_at,
|
||||
vods.published_at,
|
||||
vods.title,
|
||||
vods.date::date,
|
||||
vods.date::TIMESTAMP WITH TIME ZONE,
|
||||
vods.date_2::TIMESTAMP WITH TIME ZONE AS date_2,
|
||||
vods.note,
|
||||
vods.video_src_hash AS ipfs_cid,
|
||||
vods.announce_title,
|
||||
|
@ -984,5 +978,3 @@ FROM public.external_vtubers_toys_links;
|
|||
|
||||
|
||||
|
||||
|
||||
COMMIT;
|
|
@ -1,5 +1,4 @@
|
|||
|
||||
BEGIN;
|
||||
-- 2024-11-21
|
||||
|
||||
-- SELECT * FROM api.vods AS vods
|
||||
-- INNER JOIN api.vods_vtuber_links AS links
|
||||
|
@ -16,4 +15,3 @@ FROM api.vods_vtuber_links links
|
|||
WHERE api.vods.id = links.vod_id;
|
||||
|
||||
|
||||
COMMIT;
|
|
@ -1,11 +1,9 @@
|
|||
|
||||
BEGIN;
|
||||
-- 2024-11-22
|
||||
|
||||
UPDATE api.vods
|
||||
SET mux_asset_id = links.mux_asset_id
|
||||
FROM api.vods_mux_asset_links links
|
||||
WHERE api.vods.id = links.vod_id;
|
||||
|
||||
COMMIT;
|
||||
|
||||
|
|
@ -1,11 +1,9 @@
|
|||
|
||||
BEGIN;
|
||||
-- 2024-11-22
|
||||
|
||||
UPDATE api.vods
|
||||
SET thumbnail_id = links.b_2_file_id
|
||||
FROM api.vods_thumbnail_links links
|
||||
WHERE api.vods.id = links.vod_id;
|
||||
|
||||
COMMIT;
|
||||
|
||||
|
|
@ -1,11 +1,9 @@
|
|||
|
||||
BEGIN;
|
||||
-- 2024-11-22
|
||||
|
||||
UPDATE api.streams
|
||||
SET vtuber_id = vtuber_num
|
||||
WHERE vtuber_num IS NOT NULL;
|
||||
|
||||
COMMIT;
|
||||
|
||||
|
||||
-- @TODO api.streams.vtuber_num is deprecated in favor of api.streams.vtuber_Id
|
|
@ -0,0 +1,9 @@
|
|||
-- 2024-12-16
|
||||
|
||||
-- Relate VODs to streams by matching the same date
|
||||
|
||||
-- Update existing VODs to associate them with the corresponding stream
|
||||
UPDATE api.vods
|
||||
SET stream_id = streams.id
|
||||
FROM api.streams
|
||||
WHERE vods.date = streams.date;
|
|
@ -1,7 +1,7 @@
|
|||
# Futureporn data migrations
|
||||
|
||||
This directory is for data migrations ONLY.
|
||||
For schema migrations, see ../services/migrations node package
|
||||
For schema migrations, see ./migrations node package
|
||||
|
||||
|
||||
## Usage
|
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"name": "@futureporn/migrations-data",
|
||||
"type": "module",
|
||||
"version": "0.6.0",
|
||||
"description": "",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 0",
|
||||
"start": "node index.js"
|
||||
},
|
||||
"packageManager": "pnpm@9.6.0",
|
||||
"keywords": [],
|
||||
"author": "@CJ_Clippy",
|
||||
"license": "Unlicense",
|
||||
"dependencies": {
|
||||
"dotenv": "^16.4.5",
|
||||
"pg": "8.12.0",
|
||||
"postgres-schema-migrations": "^6.1.0"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,186 @@
|
|||
lockfileVersion: '9.0'
|
||||
|
||||
settings:
|
||||
autoInstallPeers: true
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
importers:
|
||||
|
||||
.:
|
||||
dependencies:
|
||||
dotenv:
|
||||
specifier: ^16.4.5
|
||||
version: 16.4.5
|
||||
pg:
|
||||
specifier: 8.12.0
|
||||
version: 8.12.0
|
||||
postgres-schema-migrations:
|
||||
specifier: ^6.1.0
|
||||
version: 6.1.0
|
||||
|
||||
../..: {}
|
||||
|
||||
../../packages/fetchers: {}
|
||||
|
||||
../../packages/infra: {}
|
||||
|
||||
../../packages/storage: {}
|
||||
|
||||
../../packages/types: {}
|
||||
|
||||
../../packages/utils: {}
|
||||
|
||||
../bot: {}
|
||||
|
||||
../capture: {}
|
||||
|
||||
../factory: {}
|
||||
|
||||
../htmx: {}
|
||||
|
||||
../mailbox: {}
|
||||
|
||||
../migrations-schema: {}
|
||||
|
||||
../next: {}
|
||||
|
||||
../scout: {}
|
||||
|
||||
../strapi: {}
|
||||
|
||||
../uppy: {}
|
||||
|
||||
packages:
|
||||
|
||||
dotenv@16.4.5:
|
||||
resolution: {integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
pg-cloudflare@1.1.1:
|
||||
resolution: {integrity: sha512-xWPagP/4B6BgFO+EKz3JONXv3YDgvkbVrGw2mTo3D6tVDQRh1e7cqVGvyR3BE+eQgAvx1XhW/iEASj4/jCWl3Q==}
|
||||
|
||||
pg-connection-string@2.6.4:
|
||||
resolution: {integrity: sha512-v+Z7W/0EO707aNMaAEfiGnGL9sxxumwLl2fJvCQtMn9Fxsg+lPpPkdcyBSv/KFgpGdYkMfn+EI1Or2EHjpgLCA==}
|
||||
|
||||
pg-int8@1.0.1:
|
||||
resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==}
|
||||
engines: {node: '>=4.0.0'}
|
||||
|
||||
pg-pool@3.6.2:
|
||||
resolution: {integrity: sha512-Htjbg8BlwXqSBQ9V8Vjtc+vzf/6fVUuak/3/XXKA9oxZprwW3IMDQTGHP+KDmVL7rtd+R1QjbnCFPuTHm3G4hg==}
|
||||
peerDependencies:
|
||||
pg: '>=8.0'
|
||||
|
||||
pg-protocol@1.6.1:
|
||||
resolution: {integrity: sha512-jPIlvgoD63hrEuihvIg+tJhoGjUsLPn6poJY9N5CnlPd91c2T18T/9zBtLxZSb1EhYxBRoZJtzScCaWlYLtktg==}
|
||||
|
||||
pg-types@2.2.0:
|
||||
resolution: {integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==}
|
||||
engines: {node: '>=4'}
|
||||
|
||||
pg@8.12.0:
|
||||
resolution: {integrity: sha512-A+LHUSnwnxrnL/tZ+OLfqR1SxLN3c/pgDztZ47Rpbsd4jUytsTtwQo/TLPRzPJMp/1pbhYVhH9cuSZLAajNfjQ==}
|
||||
engines: {node: '>= 8.0.0'}
|
||||
peerDependencies:
|
||||
pg-native: '>=3.0.1'
|
||||
peerDependenciesMeta:
|
||||
pg-native:
|
||||
optional: true
|
||||
|
||||
pgpass@1.0.5:
|
||||
resolution: {integrity: sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==}
|
||||
|
||||
postgres-array@2.0.0:
|
||||
resolution: {integrity: sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==}
|
||||
engines: {node: '>=4'}
|
||||
|
||||
postgres-bytea@1.0.0:
|
||||
resolution: {integrity: sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==}
|
||||
engines: {node: '>=0.10.0'}
|
||||
|
||||
postgres-date@1.0.7:
|
||||
resolution: {integrity: sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==}
|
||||
engines: {node: '>=0.10.0'}
|
||||
|
||||
postgres-interval@1.2.0:
|
||||
resolution: {integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==}
|
||||
engines: {node: '>=0.10.0'}
|
||||
|
||||
postgres-schema-migrations@6.1.0:
|
||||
resolution: {integrity: sha512-d1LJ+A9Lg4kAwuh91S8ozF8q3adFNJlStbpUF/sbjMTzSIzJClpmg4D6qyd9nvKt2el0rnZJjXZQ2r01Y5OpzA==}
|
||||
engines: {node: '>10.17.0'}
|
||||
hasBin: true
|
||||
|
||||
split2@4.2.0:
|
||||
resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==}
|
||||
engines: {node: '>= 10.x'}
|
||||
|
||||
sql-template-strings@2.2.2:
|
||||
resolution: {integrity: sha512-UXhXR2869FQaD+GMly8jAMCRZ94nU5KcrFetZfWEMd+LVVG6y0ExgHAhatEcKZ/wk8YcKPdi+hiD2wm75lq3/Q==}
|
||||
engines: {node: '>=4.0.0'}
|
||||
|
||||
xtend@4.0.2:
|
||||
resolution: {integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==}
|
||||
engines: {node: '>=0.4'}
|
||||
|
||||
snapshots:
|
||||
|
||||
dotenv@16.4.5: {}
|
||||
|
||||
pg-cloudflare@1.1.1:
|
||||
optional: true
|
||||
|
||||
pg-connection-string@2.6.4: {}
|
||||
|
||||
pg-int8@1.0.1: {}
|
||||
|
||||
pg-pool@3.6.2(pg@8.12.0):
|
||||
dependencies:
|
||||
pg: 8.12.0
|
||||
|
||||
pg-protocol@1.6.1: {}
|
||||
|
||||
pg-types@2.2.0:
|
||||
dependencies:
|
||||
pg-int8: 1.0.1
|
||||
postgres-array: 2.0.0
|
||||
postgres-bytea: 1.0.0
|
||||
postgres-date: 1.0.7
|
||||
postgres-interval: 1.2.0
|
||||
|
||||
pg@8.12.0:
|
||||
dependencies:
|
||||
pg-connection-string: 2.6.4
|
||||
pg-pool: 3.6.2(pg@8.12.0)
|
||||
pg-protocol: 1.6.1
|
||||
pg-types: 2.2.0
|
||||
pgpass: 1.0.5
|
||||
optionalDependencies:
|
||||
pg-cloudflare: 1.1.1
|
||||
|
||||
pgpass@1.0.5:
|
||||
dependencies:
|
||||
split2: 4.2.0
|
||||
|
||||
postgres-array@2.0.0: {}
|
||||
|
||||
postgres-bytea@1.0.0: {}
|
||||
|
||||
postgres-date@1.0.7: {}
|
||||
|
||||
postgres-interval@1.2.0:
|
||||
dependencies:
|
||||
xtend: 4.0.2
|
||||
|
||||
postgres-schema-migrations@6.1.0:
|
||||
dependencies:
|
||||
pg: 8.12.0
|
||||
sql-template-strings: 2.2.2
|
||||
transitivePeerDependencies:
|
||||
- pg-native
|
||||
|
||||
split2@4.2.0: {}
|
||||
|
||||
sql-template-strings@2.2.2: {}
|
||||
|
||||
xtend@4.0.2: {}
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Here we handle migrations for the postgrest database.
|
||||
|
||||
@see https://github.com/thomwright/postgres-migrations
|
||||
@see https://github.com/zakpatterson/postgres-schema-migrations
|
||||
|
||||
Reminder: only write migrations that affect schema. (don't write migrations that affect data)
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
import {migrate} from 'postgres-migrations'
|
||||
import {migrate} from 'postgres-schema-migrations'
|
||||
import path, { dirname } from 'node:path'
|
||||
import { fileURLToPath } from 'url';
|
||||
import 'dotenv/config'
|
||||
|
@ -25,7 +25,7 @@ async function main() {
|
|||
defaultDatabase: "postgres"
|
||||
}
|
||||
|
||||
await migrate(dbConfig, path.join(__dirname, "./migrations/"), { logger: console.log })
|
||||
await migrate(dbConfig, path.join(__dirname, "./migrations/"), { schema: 'migrations_schema', logger: console.log })
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue