Skip to content

Commit

Permalink
rebase
Browse files Browse the repository at this point in the history
  • Loading branch information
fenos committed Jan 30, 2024
2 parents 6ba2add + 0fb6962 commit 5e47df6
Show file tree
Hide file tree
Showing 23 changed files with 444 additions and 431 deletions.
3 changes: 2 additions & 1 deletion .dockerignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
node_modules
dist
.env
.env
DB_MIGRATION_HASH_FILE
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@ data/
bin/
coverage/
.idea/
migrations/tenants-migration-hash.txt
DB_MIGRATION_HASH_FILE
42 changes: 29 additions & 13 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,24 +1,40 @@
FROM node:18-alpine
# Base stage for shared environment setup
FROM node:18-alpine as base
RUN apk add --no-cache g++ make python3
WORKDIR /app
COPY package.json package-lock.json ./
RUN npm ci --production

FROM node:18-alpine
RUN apk add --no-cache g++ make python3
WORKDIR /app
COPY . .
# Dependencies stage - install and cache all dependencies
FROM base as dependencies
RUN npm ci
# Cache the installed node_modules for later stages
RUN cp -R node_modules /node_modules_cache

# Build stage - use cached node_modules for building the application
FROM base as build
COPY --from=dependencies /node_modules_cache ./node_modules
COPY . .
RUN npm run build

FROM node:18-alpine
# Production dependencies stage - use npm cache to install only production dependencies
FROM base as production-deps
COPY --from=dependencies /node_modules_cache ./node_modules
RUN npm ci --production

# Final stage - for the production build
FROM base as final
ARG VERSION
ENV VERSION=$VERSION
WORKDIR /app
COPY migrations migrations
COPY ecosystem.config.js package.json ./
COPY --from=0 /app/node_modules node_modules
COPY --from=1 /app/dist dist

# Copy production node_modules from the production dependencies stage
COPY --from=production-deps /app/node_modules node_modules
# Copy build artifacts from the build stage
COPY --from=build /app/dist dist
COPY ./docker-entrypoint.sh .

RUN node dist/scripts/migration-hash.js

EXPOSE 5000
ENTRYPOINT ["docker-entrypoint.sh"]
CMD ["node", "dist/server.js"]
ENTRYPOINT ["./docker-entrypoint.sh"]
CMD ["node", "dist/server.js"]
35 changes: 5 additions & 30 deletions docker-entrypoint.sh
Original file line number Diff line number Diff line change
@@ -1,36 +1,11 @@
#!/usr/bin/env bash
#!/usr/bin/env sh
set -Eeuo pipefail

# usage: file_env VAR [DEFAULT]
# ie: file_env 'XYZ_DB_PASSWORD' 'example'
# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of
# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature)
file_env() {
local var="$1"
local fileVar="${var}_FILE"
local def="${2:-}"
if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
exit 1
fi
local val="$def"
if [ "${!var:-}" ]; then
val="${!var}"
elif [ "${!fileVar:-}" ]; then
val="$(< "${!fileVar}")"
fi
export "$var"="$val"
unset "$fileVar"
}

# load secrets either from environment variables or files
file_env 'ANON_KEY'
file_env 'SERVICE_KEY'
file_env 'PGRST_JWT_SECRET'
file_env 'DATABASE_URL'
file_env 'MULTITENANT_DATABASE_URL'
file_env 'LOGFLARE_API_KEY'
file_env 'LOGFLARE_SOURCE_TOKEN'
# Check if the DB_MIGRATION_HASH_FILE exists and is not empty
if [ -s DB_MIGRATION_HASH_FILE ]; then
export DB_MIGRATION_HASH=$(cat DB_MIGRATION_HASH_FILE)
fi

exec "${@}"

11 changes: 0 additions & 11 deletions ecosystem.config.js

This file was deleted.

3 changes: 2 additions & 1 deletion src/admin-app.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ const build = (opts: FastifyServerOptions = {}, appInstance?: FastifyInstance):
app.register(plugins.adminTenantId)
app.register(plugins.logTenantId)
app.register(plugins.logRequest({ excludeUrls: ['/status', '/metrics', '/health'] }))
app.register(routes.tenant, { prefix: 'tenants' })
app.register(routes.tenants, { prefix: 'tenants' })
app.register(routes.migrations, { prefix: 'migrations' })

let registriesToMerge: Registry[] = []

Expand Down
2 changes: 0 additions & 2 deletions src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ type StorageConfigType = {
dbSuperUser: string
dbSearchPath: string
dbMigrationHash?: string
dbDisableTenantMigrations: boolean
databaseURL: string
databaseSSLRootCert?: string
databasePoolURL?: string
Expand Down Expand Up @@ -236,7 +235,6 @@ export function getConfig(options?: { reload?: boolean }): StorageConfigType {
),
dbSuperUser: getOptionalConfigFromEnv('DB_SUPER_USER') || 'postgres',
dbMigrationHash: getOptionalConfigFromEnv('DB_MIGRATION_HASH'),
dbDisableTenantMigrations: getOptionalConfigFromEnv('DB_DISABLE_TENANT_MIGRATIONS') === 'true',

// Database - Connection
dbSearchPath: getOptionalConfigFromEnv('DATABASE_SEARCH_PATH', 'DB_SEARCH_PATH') || '',
Expand Down
66 changes: 61 additions & 5 deletions src/database/migrate.ts
Original file line number Diff line number Diff line change
@@ -1,17 +1,20 @@
import { Client, ClientConfig } from 'pg'
import { loadMigrationFiles, MigrationError } from 'postgres-migrations'
import { getConfig } from '../config'
import { logger } from '../monitoring'
import { logger, logSchema } from '../monitoring'
import { BasicPgClient, Migration } from 'postgres-migrations/dist/types'
import { validateMigrationHashes } from 'postgres-migrations/dist/validation'
import { runMigration } from 'postgres-migrations/dist/run-migration'
import SQL from 'sql-template-strings'
import { searchPath } from './connection'
import { updateTenantMigrationVersion } from './tenant'
import { listTenantsToMigrate, updateTenantMigrationVersion } from './tenant'
import { knex } from './multitenant-db'
import { RunMigrationsOnTenants } from '../queue'

const {
isMultitenant,
multitenantDatabaseUrl,
pgQueueEnable,
databaseSSLRootCert,
dbAnonRole,
dbAuthenticatedRole,
Expand All @@ -34,6 +37,43 @@ const backportMigrations = [
},
]

/**
* Runs migrations for all tenants
* only one instance at the time is allowed to run
*/
export async function runMigrationsOnAllTenants() {
if (pgQueueEnable) {
return
}
const result = await knex.raw(`SELECT pg_try_advisory_lock(?);`, ['-8575985245963000605'])
const lockAcquired = result.rows.shift()?.pg_try_advisory_lock || false

if (!lockAcquired) {
return
}

try {
const tenants = listTenantsToMigrate()
for await (const tenantBatch of tenants) {
await Promise.allSettled(
tenantBatch.map((tenant) => {
return RunMigrationsOnTenants.send({
tenantId: tenant,
singletonKey: tenant,
tenant: {
ref: tenant,
},
})
})
)
}
} finally {
try {
await knex.raw(`SELECT pg_advisory_unlock(?);`, ['-8575985245963000605'])
} catch (e) {}
}
}

/**
* Runs multi-tenant migrations
*/
Expand All @@ -46,15 +86,20 @@ export async function runMultitenantMigrations(): Promise<void> {
/**
* Runs migrations on a specific tenant by providing its database DSN
* @param databaseUrl
* @param tenantId
*/
export async function runMigrationsOnTenant(databaseUrl: string): Promise<void> {
export async function runMigrationsOnTenant(databaseUrl: string, tenantId?: string): Promise<void> {
let ssl: ClientConfig['ssl'] | undefined = undefined

if (databaseSSLRootCert) {
ssl = { ca: databaseSSLRootCert }
}

await connectAndMigrate(databaseUrl, './migrations/tenant', ssl)
await connectAndMigrate(databaseUrl, './migrations/tenant', ssl, undefined, tenantId)

if (isMultitenant && tenantId) {
await updateTenantMigrationVersion([tenantId])
}
}

/**
Expand All @@ -63,12 +108,14 @@ export async function runMigrationsOnTenant(databaseUrl: string): Promise<void>
* @param migrationsDirectory
* @param ssl
* @param shouldCreateStorageSchema
* @param tenantId
*/
async function connectAndMigrate(
databaseUrl: string | undefined,
migrationsDirectory: string,
ssl?: ClientConfig['ssl'],
shouldCreateStorageSchema?: boolean
shouldCreateStorageSchema?: boolean,
tenantId?: string
) {
const dbConfig: ClientConfig = {
connectionString: databaseUrl,
Expand All @@ -78,6 +125,13 @@ async function connectAndMigrate(
}

const client = new Client(dbConfig)
client.on('error', (err) => {
logSchema.error(logger, 'Error on database connection', {
type: 'error',
error: err,
project: tenantId,
})
})
try {
await client.connect()
await migrate({ client }, migrationsDirectory, shouldCreateStorageSchema)
Expand Down Expand Up @@ -114,6 +168,8 @@ function runMigrations(migrationsDirectory: string, shouldCreateStorageSchema =
try {
const migrationTableName = 'migrations'

await client.query(`SET search_path TO ${searchPath.join(',')}`)

let appliedMigrations: Migration[] = []
if (await doesTableExist(client, migrationTableName)) {
const { rows } = await client.query(`SELECT * FROM ${migrationTableName} ORDER BY id`)
Expand Down
46 changes: 1 addition & 45 deletions src/database/tenant.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import { knex } from './multitenant-db'
import { StorageBackendError } from '../storage'
import { JwtPayload } from 'jsonwebtoken'
import { PubSubAdapter } from '../pubsub'
import { RunMigrationsEvent } from '../queue/events/run-migrations'

interface TenantConfig {
anonKey?: string
Expand All @@ -26,14 +25,7 @@ export interface Features {
}
}

const {
isMultitenant,
dbServiceRole,
serviceKey,
jwtSecret,
dbMigrationHash,
dbDisableTenantMigrations,
} = getConfig()
const { isMultitenant, dbServiceRole, serviceKey, jwtSecret, dbMigrationHash } = getConfig()

const tenantConfigCache = new Map<string, TenantConfig>()

Expand Down Expand Up @@ -80,42 +72,6 @@ export async function* listTenantsToMigrate() {
}
}

/**
* Runs migrations for all tenants
*/
export async function runMigrations() {
if (dbDisableTenantMigrations) {
return
}
const result = await knex.raw(`SELECT pg_try_advisory_lock(?);`, ['-8575985245963000605'])
const lockAcquired = result.rows.shift()?.pg_try_advisory_lock || false

if (!lockAcquired) {
return
}

try {
const tenants = listTenantsToMigrate()
for await (const tenantBatch of tenants) {
await Promise.allSettled(
tenantBatch.map((tenant) => {
return RunMigrationsEvent.send({
tenantId: tenant,
singletonKey: tenant,
tenant: {
ref: tenant,
},
})
})
)
}
} finally {
try {
await knex.raw(`SELECT pg_advisory_unlock(?);`, ['-8575985245963000605'])
} catch (e) {}
}
}

export function updateTenantMigrationVersion(tenantIds: string[]) {
return knex
.table('tenants')
Expand Down
2 changes: 1 addition & 1 deletion src/http/routes/index.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
export { default as bucket } from './bucket'
export { default as object } from './object'
export { default as render } from './render'
export { default as tenant } from './tenant'
export { default as multiPart } from './tus'
export { default as healthcheck } from './health'
export * from './tenant'
Loading

0 comments on commit 5e47df6

Please sign in to comment.