feat: backend service

This commit is contained in:
Ilia Denisov
2026-05-06 10:14:55 +03:00
committed by GitHub
parent 3e2622757e
commit f446c6a2ac
1486 changed files with 49720 additions and 266401 deletions
+544
View File
@@ -0,0 +1,544 @@
// Command backend boots the Galaxy backend process. It loads configuration,
// initialises telemetry and the structured logger, opens the Postgres pool,
// applies embedded migrations, and runs the HTTP, gRPC push, and (optional)
// Prometheus metrics listeners until SIGINT or SIGTERM triggers an orderly
// shutdown.
package main
import (
"context"
"errors"
"fmt"
"os"
"os/signal"
"syscall"
"galaxy/backend/internal/admin"
"galaxy/backend/internal/app"
"galaxy/backend/internal/auth"
"galaxy/backend/internal/config"
"galaxy/backend/internal/dockerclient"
"galaxy/backend/internal/engineclient"
"galaxy/backend/internal/geo"
"galaxy/backend/internal/lobby"
"galaxy/backend/internal/logging"
"galaxy/backend/internal/mail"
"galaxy/backend/internal/metricsapi"
"galaxy/backend/internal/notification"
backendpostgres "galaxy/backend/internal/postgres"
"galaxy/backend/push"
"galaxy/backend/internal/runtime"
backendserver "galaxy/backend/internal/server"
"galaxy/backend/internal/telemetry"
"galaxy/backend/internal/user"
mobyclient "github.com/moby/moby/client"
"github.com/google/uuid"
"go.uber.org/zap"
)
func main() {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer cancel()
if err := run(ctx); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func run(ctx context.Context) (err error) {
cfg, err := config.LoadFromEnv()
if err != nil {
return fmt.Errorf("load backend config: %w", err)
}
logger, err := logging.New(cfg.Logging)
if err != nil {
return fmt.Errorf("build backend logger: %w", err)
}
defer func() {
err = errors.Join(err, logging.Sync(logger))
}()
telemetryRT, err := telemetry.New(ctx, logger, cfg.Telemetry)
if err != nil {
return fmt.Errorf("build backend telemetry: %w", err)
}
defer func() {
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
defer shutdownCancel()
err = errors.Join(err, telemetryRT.Shutdown(shutdownCtx))
}()
db, err := backendpostgres.Open(ctx, cfg.Postgres, telemetryRT)
if err != nil {
return fmt.Errorf("open backend postgres pool: %w", err)
}
defer func() {
err = errors.Join(err, db.Close())
}()
if err := backendpostgres.ApplyMigrations(ctx, db); err != nil {
return fmt.Errorf("apply backend migrations: %w", err)
}
pushSvc, err := push.NewService(push.ServiceConfig{FreshnessWindow: cfg.FreshnessWindow}, logger, telemetryRT)
if err != nil {
return fmt.Errorf("build backend push service: %w", err)
}
geoSvc, err := geo.NewService(cfg.GeoIP.DBPath, db)
if err != nil {
return fmt.Errorf("build backend geo service: %w", err)
}
geoSvc.SetLogger(logger)
defer func() {
// Drain pending counter goroutines first so their upserts run
// against a still-open Postgres pool, then release the
// GeoLite2 resolver. Drain is bounded by cfg.ShutdownTimeout
// so a stuck DB cannot indefinitely delay process exit.
drainCtx, drainCancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
defer drainCancel()
geoSvc.Drain(drainCtx)
err = errors.Join(err, geoSvc.Close())
}()
userStore := user.NewStore(db)
userCache := user.NewCache()
authStore := auth.NewStore(db)
authCache := auth.NewCache()
if err := authCache.Warm(ctx, authStore); err != nil {
return fmt.Errorf("warm backend auth cache: %w", err)
}
logger.Info("auth cache warmed", zap.Int("active_sessions", authCache.Size()))
// auth.Service depends on user.Service through SessionRevoker, but
// user.Service depends on auth.Service through the lobby cascade
// path. Each cyclic dependency is resolved with a tiny adapter
// struct whose inner pointer is patched once both services exist.
revoker := &authSessionRevoker{}
lobbyCascade := &lobbyCascadeAdapter{}
userNotifyCascade := &userNotificationCascadeAdapter{}
lobbyNotifyPublisher := &lobbyNotificationPublisherAdapter{}
runtimeNotifyPublisher := &runtimeNotificationPublisherAdapter{}
userSvc := user.NewService(user.Deps{
Store: userStore,
Cache: userCache,
Lobby: lobbyCascade,
Notification: userNotifyCascade,
Geo: geoSvc,
SessionRevoker: revoker,
UserNameMaxRetries: cfg.Auth.UserNameMaxRetries,
Logger: logger,
})
if err := userCache.Warm(ctx, userStore); err != nil {
return fmt.Errorf("warm backend user entitlement cache: %w", err)
}
logger.Info("user entitlement cache warmed", zap.Int("snapshots", userCache.Size()))
mailStore := mail.NewStore(db)
mailSender, err := mail.NewSMTPSender(cfg.SMTP, logger)
if err != nil {
return fmt.Errorf("build mail smtp sender: %w", err)
}
mailSvc := mail.NewService(mail.Deps{
Store: mailStore,
SMTP: mailSender,
Admin: mail.NewNoopAdminNotifier(logger),
Config: cfg.Mail,
Logger: logger,
})
authSvc := auth.NewService(auth.Deps{
Store: authStore,
Cache: authCache,
User: userSvc,
Geo: geoSvc,
Mail: mailSvc,
Push: pushSvc,
Config: cfg.Auth,
Logger: logger,
})
revoker.svc = authSvc
adminStore := admin.NewStore(db)
adminCache := admin.NewCache()
if err := admin.Bootstrap(ctx, adminStore, cfg.Admin, logger); err != nil {
return fmt.Errorf("admin bootstrap: %w", err)
}
adminSvc := admin.NewService(admin.Deps{
Store: adminStore,
Cache: adminCache,
Logger: logger,
})
if err := adminCache.Warm(ctx, adminStore); err != nil {
return fmt.Errorf("warm backend admin cache: %w", err)
}
logger.Info("admin cache warmed", zap.Int("admins", adminCache.Size()))
runtimeGateway := &runtimeGatewayAdapter{}
lobbyStore := lobby.NewStore(db)
lobbyCache := lobby.NewCache()
lobbySvc, err := lobby.NewService(lobby.Deps{
Store: lobbyStore,
Cache: lobbyCache,
Runtime: runtimeGateway,
Notification: lobbyNotifyPublisher,
Entitlement: &userEntitlementAdapter{svc: userSvc},
Config: cfg.Lobby,
Logger: logger,
})
if err != nil {
return fmt.Errorf("build backend lobby service: %w", err)
}
if err := lobbyCache.Warm(ctx, lobbyStore); err != nil {
return fmt.Errorf("warm backend lobby cache: %w", err)
}
games, members, raceNames := lobbyCache.Sizes()
logger.Info("lobby cache warmed",
zap.Int("games", games),
zap.Int("memberships", members),
zap.Int("race_names", raceNames),
)
lobbyCascade.svc = lobbySvc
dockerCli, err := mobyclient.New(mobyclient.WithHost(cfg.Docker.Host))
if err != nil {
return fmt.Errorf("build docker client: %w", err)
}
dockerAdapter, err := dockerclient.NewAdapter(dockerclient.AdapterConfig{Docker: dockerCli})
if err != nil {
return fmt.Errorf("build docker adapter: %w", err)
}
if err := dockerAdapter.EnsureNetwork(ctx, cfg.Docker.Network); err != nil {
return fmt.Errorf("docker network %q: %w", cfg.Docker.Network, err)
}
engineCli, err := engineclient.NewClient(engineclient.Config{
CallTimeout: cfg.Engine.CallTimeout,
ProbeTimeout: cfg.Engine.ProbeTimeout,
})
if err != nil {
return fmt.Errorf("build engine client: %w", err)
}
defer func() {
err = errors.Join(err, engineCli.Close())
}()
runtimeStore := runtime.NewStore(db)
runtimeCache := runtime.NewCache()
engineVersionSvc := runtime.NewEngineVersionService(runtimeStore, runtimeCache, nil)
runtimeSvc, err := runtime.NewService(runtime.Deps{
Store: runtimeStore,
Cache: runtimeCache,
EngineVersions: engineVersionSvc,
Docker: dockerAdapter,
Engine: engineCli,
Lobby: &lobbyConsumerAdapter{svc: lobbySvc},
Notification: runtimeNotifyPublisher,
DockerNetwork: cfg.Docker.Network,
HostStateRoot: cfg.Game.StateRoot,
Config: cfg.Runtime,
Logger: logger,
})
if err != nil {
return fmt.Errorf("build runtime service: %w", err)
}
if err := runtimeCache.Warm(ctx, runtimeStore); err != nil {
return fmt.Errorf("warm backend runtime cache: %w", err)
}
rtRecords, rtVersions := runtimeCache.Sizes()
logger.Info("runtime cache warmed",
zap.Int("active_runtimes", rtRecords),
zap.Int("engine_versions", rtVersions),
)
runtimeGateway.svc = runtimeSvc
notifStore := notification.NewStore(db)
notifSvc := notification.NewService(notification.Deps{
Store: notifStore,
Mail: mailSvc,
Push: pushSvc,
Accounts: userSvc,
Config: cfg.Notification,
Logger: logger,
})
userNotifyCascade.svc = notifSvc
lobbyNotifyPublisher.svc = notifSvc
runtimeNotifyPublisher.svc = notifSvc
if email := cfg.Notification.AdminEmail; email == "" {
logger.Info("notification admin email not configured (BACKEND_NOTIFICATION_ADMIN_EMAIL); admin-channel routes will be skipped")
} else {
logger.Info("notification admin email configured", zap.String("admin_email", email))
}
publicAuthHandlers := backendserver.NewPublicAuthHandlers(authSvc, logger)
internalSessionsHandlers := backendserver.NewInternalSessionsHandlers(authSvc, logger)
userAccountHandlers := backendserver.NewUserAccountHandlers(userSvc, logger)
adminUsersHandlers := backendserver.NewAdminUsersHandlers(userSvc, logger)
adminAdminAccountsHandlers := backendserver.NewAdminAdminAccountsHandlers(adminSvc, logger)
internalUsersHandlers := backendserver.NewInternalUsersHandlers(userSvc, logger)
userLobbyGamesHandlers := backendserver.NewUserLobbyGamesHandlers(lobbySvc, logger)
userLobbyApplicationsHandlers := backendserver.NewUserLobbyApplicationsHandlers(lobbySvc, logger)
userLobbyInvitesHandlers := backendserver.NewUserLobbyInvitesHandlers(lobbySvc, logger)
userLobbyMembershipsHandlers := backendserver.NewUserLobbyMembershipsHandlers(lobbySvc, logger)
userLobbyMyHandlers := backendserver.NewUserLobbyMyHandlers(lobbySvc, logger)
userLobbyRaceNamesHandlers := backendserver.NewUserLobbyRaceNamesHandlers(lobbySvc, logger)
adminGamesHandlers := backendserver.NewAdminGamesHandlers(lobbySvc, logger)
adminEngineVersionsHandlers := backendserver.NewAdminEngineVersionsHandlers(engineVersionSvc, logger)
adminRuntimesHandlers := backendserver.NewAdminRuntimesHandlers(runtimeSvc, logger)
adminMailHandlers := backendserver.NewAdminMailHandlers(mailSvc, logger)
adminNotificationsHandlers := backendserver.NewAdminNotificationsHandlers(notifSvc, logger)
adminGeoHandlers := backendserver.NewAdminGeoHandlers(geoSvc, logger)
userGamesHandlers := backendserver.NewUserGamesHandlers(runtimeSvc, engineCli, logger)
ready := func() bool {
return authCache.Ready() && userCache.Ready() && adminCache.Ready() && lobbyCache.Ready() && runtimeCache.Ready()
}
handler, err := backendserver.NewRouter(backendserver.RouterDependencies{
Logger: logger,
Telemetry: telemetryRT,
Ready: ready,
AdminVerifier: adminSvc,
GeoCounter: geoSvc,
PublicAuth: publicAuthHandlers,
InternalSessions: internalSessionsHandlers,
UserAccount: userAccountHandlers,
AdminUsers: adminUsersHandlers,
AdminAdminAccounts: adminAdminAccountsHandlers,
InternalUsers: internalUsersHandlers,
UserLobbyGames: userLobbyGamesHandlers,
UserLobbyApplications: userLobbyApplicationsHandlers,
UserLobbyInvites: userLobbyInvitesHandlers,
UserLobbyMemberships: userLobbyMembershipsHandlers,
UserLobbyMy: userLobbyMyHandlers,
UserLobbyRaceNames: userLobbyRaceNamesHandlers,
AdminGames: adminGamesHandlers,
AdminRuntimes: adminRuntimesHandlers,
AdminEngineVersions: adminEngineVersionsHandlers,
AdminMail: adminMailHandlers,
AdminNotifications: adminNotificationsHandlers,
AdminGeo: adminGeoHandlers,
UserGames: userGamesHandlers,
})
if err != nil {
return fmt.Errorf("build backend router: %w", err)
}
httpServer := backendserver.NewServer(cfg.HTTP, handler, logger)
pushServer := push.NewServer(cfg.GRPCPush, pushSvc, logger, telemetryRT)
metricsServer := metricsapi.NewServer(telemetryRT.PrometheusListenAddr(), telemetryRT.Handler(), logger)
lobbySweeper := lobby.NewSweeper(lobbySvc)
mailWorker := mail.NewWorker(mailSvc)
notifWorker := notification.NewWorker(notifSvc)
runtimeWorkers := runtimeSvc.Workers()
runtimeScheduler := runtimeSvc.SchedulerComponent()
runtimeReconciler := runtimeSvc.Reconciler()
components := []app.Component{httpServer, pushServer, mailWorker, notifWorker, lobbySweeper, runtimeWorkers, runtimeScheduler, runtimeReconciler}
if metricsServer.Enabled() {
components = append(components, metricsServer)
}
logger.Info("backend application starting",
zap.String("http_addr", cfg.HTTP.Addr),
zap.String("grpc_push_addr", cfg.GRPCPush.Addr),
zap.String("traces_exporter", cfg.Telemetry.TracesExporter),
zap.String("metrics_exporter", cfg.Telemetry.MetricsExporter),
zap.String("prometheus_addr", telemetryRT.PrometheusListenAddr()),
)
return app.New(cfg.ShutdownTimeout, components...).Run(ctx)
}
// authSessionRevoker adapts `*auth.Service.RevokeAllForUser` to the
// `user.SessionRevoker` interface (which returns only an error, while
// auth's method also returns the slice of revoked sessions). The svc
// field is patched by the caller after both services have been
// constructed — auth.Service depends on user.Service through
// `UserEnsurer`, while user.Service depends on auth.Service through
// `SessionRevoker`. Wiring the adapter struct first and patching the
// pointer afterwards breaks the cycle without introducing a third
// package.
type authSessionRevoker struct {
svc *auth.Service
}
func (r *authSessionRevoker) RevokeAllForUser(ctx context.Context, userID uuid.UUID) error {
if r == nil || r.svc == nil {
return nil
}
_, err := r.svc.RevokeAllForUser(ctx, userID)
return err
}
// lobbyCascadeAdapter adapts `*lobby.Service` to the
// `user.LobbyCascade` interface. The svc field is patched after both
// services have been constructed — same dependency-cycle pattern as
// authSessionRevoker.
type lobbyCascadeAdapter struct {
svc *lobby.Service
}
func (a *lobbyCascadeAdapter) OnUserBlocked(ctx context.Context, userID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.OnUserBlocked(ctx, userID)
}
func (a *lobbyCascadeAdapter) OnUserDeleted(ctx context.Context, userID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.OnUserDeleted(ctx, userID)
}
// userEntitlementAdapter adapts `*user.Service.GetEntitlementSnapshot`
// to the `lobby.EntitlementProvider` interface. Lobby reads the
// `MaxRegisteredRaceNames` field at race-name registration time to
// enforce the per-tier quota.
type userEntitlementAdapter struct {
svc *user.Service
}
func (a *userEntitlementAdapter) GetMaxRegisteredRaceNames(ctx context.Context, userID uuid.UUID) (int32, error) {
if a == nil || a.svc == nil {
return 1, nil
}
snap, err := a.svc.GetEntitlementSnapshot(ctx, userID)
if err != nil {
return 0, err
}
return snap.MaxRegisteredRaceNames, nil
}
// runtimeGatewayAdapter implements `lobby.RuntimeGateway` by
// delegating to `*runtime.Service`. The svc pointer is patched after
// the services are constructed — runtime depends on lobby
// (LobbyConsumer), so we wire the adapter first and patch it once
// runtimeSvc exists.
type runtimeGatewayAdapter struct {
svc *runtime.Service
}
func (a *runtimeGatewayAdapter) StartGame(ctx context.Context, gameID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.StartGame(ctx, gameID)
}
func (a *runtimeGatewayAdapter) StopGame(ctx context.Context, gameID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.StopGame(ctx, gameID)
}
func (a *runtimeGatewayAdapter) PauseGame(ctx context.Context, gameID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.PauseGame(ctx, gameID)
}
func (a *runtimeGatewayAdapter) ResumeGame(ctx context.Context, gameID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.ResumeGame(ctx, gameID)
}
// lobbyConsumerAdapter implements `runtime.LobbyConsumer` by
// translating runtime DTOs into the lobby package's vocabulary.
type lobbyConsumerAdapter struct {
svc *lobby.Service
}
func (a *lobbyConsumerAdapter) OnRuntimeSnapshot(ctx context.Context, gameID uuid.UUID, snapshot runtime.LobbySnapshot) error {
if a == nil || a.svc == nil {
return nil
}
stats := make([]lobby.PlayerTurnStats, 0, len(snapshot.PlayerStats))
for _, s := range snapshot.PlayerStats {
stats = append(stats, lobby.PlayerTurnStats{
UserID: s.UserID,
InitialPlanets: s.InitialPlanets,
InitialPopulation: s.InitialPopulation,
CurrentPlanets: s.CurrentPlanets,
CurrentPopulation: s.CurrentPopulation,
MaxPlanets: s.MaxPlanets,
MaxPopulation: s.MaxPopulation,
})
}
return a.svc.OnRuntimeSnapshot(ctx, gameID, lobby.RuntimeSnapshot{
CurrentTurn: snapshot.CurrentTurn,
RuntimeStatus: snapshot.RuntimeStatus,
EngineHealth: snapshot.EngineHealth,
ObservedAt: snapshot.ObservedAt,
PlayerStats: stats,
})
}
func (a *lobbyConsumerAdapter) OnRuntimeJobResult(ctx context.Context, gameID uuid.UUID, result runtime.JobResult) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.OnRuntimeJobResult(ctx, gameID, lobby.RuntimeJobResult{
Op: result.Op,
Status: result.Status,
Message: result.Message,
})
}
// userNotificationCascadeAdapter implements
// `user.NotificationCascade` by delegating to `*notification.Service`.
// Construction order: user.Service depends on the cascade and is
// built before notification.Service. The svc pointer is patched once
// notifSvc exists.
type userNotificationCascadeAdapter struct {
svc *notification.Service
}
func (a *userNotificationCascadeAdapter) OnUserDeleted(ctx context.Context, userID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.OnUserDeleted(ctx, userID)
}
// lobbyNotificationPublisherAdapter implements
// `lobby.NotificationPublisher` by translating each LobbyNotification
// into a notification.Intent through the publisher Adapter exposed by
// notification.Service.
type lobbyNotificationPublisherAdapter struct {
svc *notification.Service
}
func (a *lobbyNotificationPublisherAdapter) PublishLobbyEvent(ctx context.Context, ev lobby.LobbyNotification) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.LobbyAdapter().PublishLobbyEvent(ctx, ev)
}
// runtimeNotificationPublisherAdapter implements
// `runtime.NotificationPublisher` by delegating to the runtime adapter
// exposed by notification.Service.
type runtimeNotificationPublisherAdapter struct {
svc *notification.Service
}
func (a *runtimeNotificationPublisherAdapter) PublishRuntimeEvent(ctx context.Context, kind, idempotencyKey string, payload map[string]any) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.RuntimeAdapter().PublishRuntimeEvent(ctx, kind, idempotencyKey, payload)
}
+199
View File
@@ -0,0 +1,199 @@
// Command jetgen regenerates the go-jet/v2 query-builder code under
// galaxy/backend/internal/postgres/jet/ against a transient PostgreSQL
// instance.
//
// Invoke as `go run ./cmd/jetgen` (or via the `make jet` target) from inside
// `galaxy/backend`. The tool is not part of the runtime binary.
//
// Steps:
//
// 1. start a postgres:16-alpine container via testcontainers-go
// 2. open it through galaxy/postgres with search_path=backend
// 3. ensure the backend schema exists, then apply the embedded goose
// migrations
// 4. run jet's PostgreSQL generator against schema=backend, writing into
// ../internal/postgres/jet
package main
import (
"context"
"database/sql"
"errors"
"fmt"
"log"
"net/url"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"galaxy/backend/internal/postgres/migrations"
"galaxy/postgres"
jetpostgres "github.com/go-jet/jet/v2/generator/postgres"
testcontainers "github.com/testcontainers/testcontainers-go"
tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres"
"github.com/testcontainers/testcontainers-go/wait"
)
const (
postgresImage = "postgres:16-alpine"
superuserName = "galaxy"
superuserPassword = "galaxy"
superuserDatabase = "galaxy_backend"
backendSchema = "backend"
containerStartup = 90 * time.Second
defaultOpTimeout = 10 * time.Second
jetOutputDirSuffix = "internal/postgres/jet"
)
func main() {
if err := run(context.Background()); err != nil {
log.Fatalf("jetgen: %v", err)
}
}
func run(ctx context.Context) error {
outputDir, err := jetOutputDir()
if err != nil {
return err
}
container, err := tcpostgres.Run(ctx, postgresImage,
tcpostgres.WithDatabase(superuserDatabase),
tcpostgres.WithUsername(superuserName),
tcpostgres.WithPassword(superuserPassword),
testcontainers.WithWaitStrategy(
wait.ForLog("database system is ready to accept connections").
WithOccurrence(2).
WithStartupTimeout(containerStartup),
),
)
if err != nil {
return fmt.Errorf("start postgres container: %w", err)
}
defer func() {
if termErr := testcontainers.TerminateContainer(container); termErr != nil {
log.Printf("jetgen: terminate container: %v", termErr)
}
}()
baseDSN, err := container.ConnectionString(ctx, "sslmode=disable")
if err != nil {
return fmt.Errorf("resolve container dsn: %w", err)
}
scopedDSN, err := dsnWithSearchPath(baseDSN, backendSchema)
if err != nil {
return err
}
if err := applyMigrations(ctx, scopedDSN); err != nil {
return err
}
// jet's ProcessSchema wipes <outputDir>/<schema> on every run, so package
// metadata kept directly under outputDir (e.g. jet.go) survives. We only
// ensure the parent directory exists so the first run on a fresh
// checkout does not fail with ENOENT.
if err := os.MkdirAll(outputDir, 0o755); err != nil {
return fmt.Errorf("ensure jet output dir: %w", err)
}
jetDB, err := openScoped(ctx, scopedDSN)
if err != nil {
return fmt.Errorf("open scoped pool for jet generation: %w", err)
}
defer func() { _ = jetDB.Close() }()
// Drop goose's bookkeeping table inside the schema-scoped connection so
// jet does not generate code for it. The table is recreated on the next
// migration run; jetgen never reuses the container.
if _, err := jetDB.ExecContext(ctx, "DROP TABLE IF EXISTS goose_db_version"); err != nil {
return fmt.Errorf("drop goose_db_version: %w", err)
}
if err := jetpostgres.GenerateDB(jetDB, backendSchema, outputDir); err != nil {
return fmt.Errorf("jet generate: %w", err)
}
log.Printf("jetgen: generated jet code into %s (schema=%s)", outputDir, backendSchema)
return nil
}
// dsnWithSearchPath rewrites the connection string so each new connection
// pins search_path to the named schema. The schema must exist before the
// first query that depends on search_path resolution; ensureSchema handles
// that on the migration path.
func dsnWithSearchPath(baseDSN, schema string) (string, error) {
parsed, err := url.Parse(baseDSN)
if err != nil {
return "", fmt.Errorf("parse base dsn: %w", err)
}
values := parsed.Query()
values.Set("search_path", schema)
if values.Get("sslmode") == "" {
values.Set("sslmode", "disable")
}
parsed.RawQuery = values.Encode()
return parsed.String(), nil
}
func applyMigrations(ctx context.Context, dsn string) error {
db, err := openScoped(ctx, dsn)
if err != nil {
return fmt.Errorf("open scoped pool: %w", err)
}
defer func() { _ = db.Close() }()
if err := postgres.Ping(ctx, db, defaultOpTimeout); err != nil {
return err
}
if err := ensureSchema(ctx, db, backendSchema); err != nil {
return err
}
if err := postgres.RunMigrations(ctx, db, migrations.Migrations(), "."); err != nil {
return fmt.Errorf("run migrations: %w", err)
}
return nil
}
// ensureSchema creates the named schema when it is absent. The statement is
// idempotent and unaffected by search_path, so it must run before goose
// creates its bookkeeping table inside the schema-scoped connection.
func ensureSchema(ctx context.Context, db *sql.DB, schema string) error {
stmt := fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS %s", quoteIdent(schema))
if _, err := db.ExecContext(ctx, stmt); err != nil {
return fmt.Errorf("ensure schema %q: %w", schema, err)
}
return nil
}
func openScoped(ctx context.Context, dsn string) (*sql.DB, error) {
cfg := postgres.DefaultConfig()
cfg.PrimaryDSN = dsn
cfg.OperationTimeout = defaultOpTimeout
return postgres.OpenPrimary(ctx, cfg)
}
// jetOutputDir returns the absolute path that jet should write into. The path
// is anchored to galaxy/backend via runtime.Caller so the tool can be
// invoked from any working directory.
func jetOutputDir() (string, error) {
_, file, _, ok := runtime.Caller(0)
if !ok {
return "", errors.New("resolve runtime caller for jet output path")
}
dir := filepath.Dir(file)
// dir = .../galaxy/backend/cmd/jetgen
moduleRoot := filepath.Clean(filepath.Join(dir, "..", ".."))
return filepath.Join(moduleRoot, jetOutputDirSuffix), nil
}
// quoteIdent quotes a SQL identifier by doubling embedded quote characters.
// jetgen uses a fixed schema name, but quoting keeps the helper safe to reuse
// if the constant ever changes to a configurable value.
func quoteIdent(name string) string {
return `"` + strings.ReplaceAll(name, `"`, `""`) + `"`
}