edc9709bd6
After a host reboot macOS clears /private/tmp, so the per-game bind-mount source under /tmp/galaxy-game-state/<uuid> vanishes and Docker refuses to restart the long-lived engine container under `restart: unless-stopped`. The container then sits in `exited` state and the dev sandbox is unreachable until the developer manually rms it and runs `make up` twice. Fix `make -C tools/local-dev up` to heal this in one cycle: 1. `prune-broken-engines` (new make target wired into `up`) walks every container labelled `galaxy-game-engine` and removes the ones not in `running` / `restarting` state. Healthy long-lived containers survive normal up/down cycles untouched. 2. The backend now runs a single reconciliation pass before the dev-sandbox bootstrap (`Reconciler().Tick(ctx)` in main.go). Without it, bootstrap would reuse the soon-to-be-cancelled game that the periodic ticker is about to mark `removed`. The pre-tick cascades the orphan runtime row through markRemoved → lobby cancel before bootstrap purges terminal sandbox games and creates a fresh one — so a single `make up` lands a working sandbox with a brand new state directory. README troubleshooting section documents the symptom and the recovery so the bind-mount-source error message is greppable. Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
582 lines
20 KiB
Go
582 lines
20 KiB
Go
// Command backend boots the Galaxy backend process. It loads configuration,
|
|
// initialises telemetry and the structured logger, opens the Postgres pool,
|
|
// applies embedded migrations, and runs the HTTP, gRPC push, and (optional)
|
|
// Prometheus metrics listeners until SIGINT or SIGTERM triggers an orderly
|
|
// shutdown.
|
|
package main
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"os"
|
|
"os/signal"
|
|
"syscall"
|
|
|
|
// time/tzdata embeds the IANA timezone database so time.LoadLocation
|
|
// works in container images without /usr/share/zoneinfo (distroless
|
|
// static, alpine without the tzdata apk). The auth and user-settings
|
|
// flows validate the caller's `time_zone` via time.LoadLocation;
|
|
// without this import only "UTC" and fixed offsets would resolve.
|
|
_ "time/tzdata"
|
|
|
|
"galaxy/backend/internal/admin"
|
|
"galaxy/backend/internal/app"
|
|
"galaxy/backend/internal/auth"
|
|
"galaxy/backend/internal/config"
|
|
"galaxy/backend/internal/devsandbox"
|
|
"galaxy/backend/internal/dockerclient"
|
|
"galaxy/backend/internal/engineclient"
|
|
"galaxy/backend/internal/geo"
|
|
"galaxy/backend/internal/lobby"
|
|
"galaxy/backend/internal/logging"
|
|
"galaxy/backend/internal/mail"
|
|
"galaxy/backend/internal/metricsapi"
|
|
"galaxy/backend/internal/notification"
|
|
backendpostgres "galaxy/backend/internal/postgres"
|
|
"galaxy/backend/push"
|
|
"galaxy/backend/internal/runtime"
|
|
backendserver "galaxy/backend/internal/server"
|
|
"galaxy/backend/internal/telemetry"
|
|
"galaxy/backend/internal/user"
|
|
|
|
mobyclient "github.com/moby/moby/client"
|
|
|
|
"github.com/google/uuid"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
func main() {
|
|
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
|
defer cancel()
|
|
|
|
if err := run(ctx); err != nil {
|
|
fmt.Fprintln(os.Stderr, err)
|
|
os.Exit(1)
|
|
}
|
|
}
|
|
|
|
func run(ctx context.Context) (err error) {
|
|
cfg, err := config.LoadFromEnv()
|
|
if err != nil {
|
|
return fmt.Errorf("load backend config: %w", err)
|
|
}
|
|
|
|
logger, err := logging.New(cfg.Logging)
|
|
if err != nil {
|
|
return fmt.Errorf("build backend logger: %w", err)
|
|
}
|
|
defer func() {
|
|
err = errors.Join(err, logging.Sync(logger))
|
|
}()
|
|
|
|
telemetryRT, err := telemetry.New(ctx, logger, cfg.Telemetry)
|
|
if err != nil {
|
|
return fmt.Errorf("build backend telemetry: %w", err)
|
|
}
|
|
defer func() {
|
|
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
|
|
defer shutdownCancel()
|
|
err = errors.Join(err, telemetryRT.Shutdown(shutdownCtx))
|
|
}()
|
|
|
|
db, err := backendpostgres.Open(ctx, cfg.Postgres, telemetryRT)
|
|
if err != nil {
|
|
return fmt.Errorf("open backend postgres pool: %w", err)
|
|
}
|
|
defer func() {
|
|
err = errors.Join(err, db.Close())
|
|
}()
|
|
|
|
if err := backendpostgres.ApplyMigrations(ctx, db); err != nil {
|
|
return fmt.Errorf("apply backend migrations: %w", err)
|
|
}
|
|
|
|
pushSvc, err := push.NewService(push.ServiceConfig{FreshnessWindow: cfg.FreshnessWindow}, logger, telemetryRT)
|
|
if err != nil {
|
|
return fmt.Errorf("build backend push service: %w", err)
|
|
}
|
|
|
|
geoSvc, err := geo.NewService(cfg.GeoIP.DBPath, db)
|
|
if err != nil {
|
|
return fmt.Errorf("build backend geo service: %w", err)
|
|
}
|
|
geoSvc.SetLogger(logger)
|
|
defer func() {
|
|
// Drain pending counter goroutines first so their upserts run
|
|
// against a still-open Postgres pool, then release the
|
|
// GeoLite2 resolver. Drain is bounded by cfg.ShutdownTimeout
|
|
// so a stuck DB cannot indefinitely delay process exit.
|
|
drainCtx, drainCancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
|
|
defer drainCancel()
|
|
geoSvc.Drain(drainCtx)
|
|
err = errors.Join(err, geoSvc.Close())
|
|
}()
|
|
|
|
userStore := user.NewStore(db)
|
|
userCache := user.NewCache()
|
|
|
|
authStore := auth.NewStore(db)
|
|
authCache := auth.NewCache()
|
|
if err := authCache.Warm(ctx, authStore); err != nil {
|
|
return fmt.Errorf("warm backend auth cache: %w", err)
|
|
}
|
|
logger.Info("auth cache warmed", zap.Int("active_sessions", authCache.Size()))
|
|
|
|
// auth.Service depends on user.Service through SessionRevoker, but
|
|
// user.Service depends on auth.Service through the lobby cascade
|
|
// path. Each cyclic dependency is resolved with a tiny adapter
|
|
// struct whose inner pointer is patched once both services exist.
|
|
revoker := &authSessionRevoker{}
|
|
lobbyCascade := &lobbyCascadeAdapter{}
|
|
userNotifyCascade := &userNotificationCascadeAdapter{}
|
|
lobbyNotifyPublisher := &lobbyNotificationPublisherAdapter{}
|
|
runtimeNotifyPublisher := &runtimeNotificationPublisherAdapter{}
|
|
|
|
userSvc := user.NewService(user.Deps{
|
|
|
|
Store: userStore,
|
|
Cache: userCache,
|
|
Lobby: lobbyCascade,
|
|
Notification: userNotifyCascade,
|
|
Geo: geoSvc,
|
|
SessionRevoker: revoker,
|
|
UserNameMaxRetries: cfg.Auth.UserNameMaxRetries,
|
|
Logger: logger,
|
|
})
|
|
if err := userCache.Warm(ctx, userStore); err != nil {
|
|
return fmt.Errorf("warm backend user entitlement cache: %w", err)
|
|
}
|
|
logger.Info("user entitlement cache warmed", zap.Int("snapshots", userCache.Size()))
|
|
|
|
mailStore := mail.NewStore(db)
|
|
mailSender, err := mail.NewSMTPSender(cfg.SMTP, logger)
|
|
if err != nil {
|
|
return fmt.Errorf("build mail smtp sender: %w", err)
|
|
}
|
|
mailSvc := mail.NewService(mail.Deps{
|
|
Store: mailStore,
|
|
SMTP: mailSender,
|
|
Admin: mail.NewNoopAdminNotifier(logger),
|
|
Config: cfg.Mail,
|
|
Logger: logger,
|
|
})
|
|
|
|
authSvc := auth.NewService(auth.Deps{
|
|
Store: authStore,
|
|
Cache: authCache,
|
|
User: userSvc,
|
|
Geo: geoSvc,
|
|
Mail: mailSvc,
|
|
Push: pushSvc,
|
|
Config: cfg.Auth,
|
|
Logger: logger,
|
|
})
|
|
revoker.svc = authSvc
|
|
|
|
adminStore := admin.NewStore(db)
|
|
adminCache := admin.NewCache()
|
|
if err := admin.Bootstrap(ctx, adminStore, cfg.Admin, logger); err != nil {
|
|
return fmt.Errorf("admin bootstrap: %w", err)
|
|
}
|
|
adminSvc := admin.NewService(admin.Deps{
|
|
Store: adminStore,
|
|
Cache: adminCache,
|
|
Logger: logger,
|
|
})
|
|
if err := adminCache.Warm(ctx, adminStore); err != nil {
|
|
return fmt.Errorf("warm backend admin cache: %w", err)
|
|
}
|
|
logger.Info("admin cache warmed", zap.Int("admins", adminCache.Size()))
|
|
|
|
runtimeGateway := &runtimeGatewayAdapter{}
|
|
lobbyStore := lobby.NewStore(db)
|
|
lobbyCache := lobby.NewCache()
|
|
lobbySvc, err := lobby.NewService(lobby.Deps{
|
|
Store: lobbyStore,
|
|
Cache: lobbyCache,
|
|
Runtime: runtimeGateway,
|
|
Notification: lobbyNotifyPublisher,
|
|
Entitlement: &userEntitlementAdapter{svc: userSvc},
|
|
Config: cfg.Lobby,
|
|
Logger: logger,
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("build backend lobby service: %w", err)
|
|
}
|
|
if err := lobbyCache.Warm(ctx, lobbyStore); err != nil {
|
|
return fmt.Errorf("warm backend lobby cache: %w", err)
|
|
}
|
|
games, members, raceNames := lobbyCache.Sizes()
|
|
logger.Info("lobby cache warmed",
|
|
zap.Int("games", games),
|
|
zap.Int("memberships", members),
|
|
zap.Int("race_names", raceNames),
|
|
)
|
|
lobbyCascade.svc = lobbySvc
|
|
|
|
dockerCli, err := mobyclient.New(mobyclient.WithHost(cfg.Docker.Host))
|
|
if err != nil {
|
|
return fmt.Errorf("build docker client: %w", err)
|
|
}
|
|
dockerAdapter, err := dockerclient.NewAdapter(dockerclient.AdapterConfig{Docker: dockerCli})
|
|
if err != nil {
|
|
return fmt.Errorf("build docker adapter: %w", err)
|
|
}
|
|
if err := dockerAdapter.EnsureNetwork(ctx, cfg.Docker.Network); err != nil {
|
|
return fmt.Errorf("docker network %q: %w", cfg.Docker.Network, err)
|
|
}
|
|
engineCli, err := engineclient.NewClient(engineclient.Config{
|
|
CallTimeout: cfg.Engine.CallTimeout,
|
|
ProbeTimeout: cfg.Engine.ProbeTimeout,
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("build engine client: %w", err)
|
|
}
|
|
defer func() {
|
|
err = errors.Join(err, engineCli.Close())
|
|
}()
|
|
|
|
runtimeStore := runtime.NewStore(db)
|
|
runtimeCache := runtime.NewCache()
|
|
engineVersionSvc := runtime.NewEngineVersionService(runtimeStore, runtimeCache, nil)
|
|
runtimeSvc, err := runtime.NewService(runtime.Deps{
|
|
Store: runtimeStore,
|
|
Cache: runtimeCache,
|
|
EngineVersions: engineVersionSvc,
|
|
Docker: dockerAdapter,
|
|
Engine: engineCli,
|
|
Lobby: &lobbyConsumerAdapter{svc: lobbySvc},
|
|
Notification: runtimeNotifyPublisher,
|
|
DockerNetwork: cfg.Docker.Network,
|
|
HostStateRoot: cfg.Game.StateRoot,
|
|
Config: cfg.Runtime,
|
|
Logger: logger,
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("build runtime service: %w", err)
|
|
}
|
|
if err := runtimeCache.Warm(ctx, runtimeStore); err != nil {
|
|
return fmt.Errorf("warm backend runtime cache: %w", err)
|
|
}
|
|
rtRecords, rtVersions := runtimeCache.Sizes()
|
|
logger.Info("runtime cache warmed",
|
|
zap.Int("active_runtimes", rtRecords),
|
|
zap.Int("engine_versions", rtVersions),
|
|
)
|
|
runtimeGateway.svc = runtimeSvc
|
|
|
|
// Run a single reconciliation pass before the dev-sandbox
|
|
// bootstrap so any runtime row pointing at a vanished engine
|
|
// container (host reboot wiped /tmp/galaxy-game-state/<uuid>;
|
|
// `tools/local-dev`'s `prune-broken-engines` target reaped the
|
|
// husk) is already cascaded through `markRemoved` → lobby
|
|
// `cancelled` by the time the bootstrap walks the sandbox list.
|
|
// Without this pre-tick the bootstrap would reuse the
|
|
// soon-to-be-cancelled game and force the developer into a
|
|
// second `make up` cycle to land a healthy sandbox. Failures are
|
|
// non-fatal: the periodic ticker started later catches up, and
|
|
// the worst case degrades to the legacy two-cycle recovery.
|
|
if err := runtimeSvc.Reconciler().Tick(ctx); err != nil {
|
|
logger.Warn("pre-bootstrap reconciler tick failed", zap.Error(err))
|
|
}
|
|
|
|
if err := devsandbox.Bootstrap(ctx, devsandbox.Deps{
|
|
Users: userSvc,
|
|
Lobby: lobbySvc,
|
|
EngineVersions: engineVersionSvc,
|
|
}, cfg.DevSandbox, logger); err != nil {
|
|
return fmt.Errorf("dev sandbox bootstrap: %w", err)
|
|
}
|
|
|
|
notifStore := notification.NewStore(db)
|
|
notifSvc := notification.NewService(notification.Deps{
|
|
Store: notifStore,
|
|
Mail: mailSvc,
|
|
Push: pushSvc,
|
|
Accounts: userSvc,
|
|
Config: cfg.Notification,
|
|
Logger: logger,
|
|
})
|
|
userNotifyCascade.svc = notifSvc
|
|
lobbyNotifyPublisher.svc = notifSvc
|
|
runtimeNotifyPublisher.svc = notifSvc
|
|
if email := cfg.Notification.AdminEmail; email == "" {
|
|
logger.Info("notification admin email not configured (BACKEND_NOTIFICATION_ADMIN_EMAIL); admin-channel routes will be skipped")
|
|
} else {
|
|
logger.Info("notification admin email configured", zap.String("admin_email", email))
|
|
}
|
|
|
|
publicAuthHandlers := backendserver.NewPublicAuthHandlers(authSvc, logger)
|
|
internalSessionsHandlers := backendserver.NewInternalSessionsHandlers(authSvc, logger)
|
|
userSessionsHandlers := backendserver.NewUserSessionsHandlers(authSvc, logger)
|
|
userAccountHandlers := backendserver.NewUserAccountHandlers(userSvc, logger)
|
|
adminUsersHandlers := backendserver.NewAdminUsersHandlers(userSvc, logger)
|
|
adminAdminAccountsHandlers := backendserver.NewAdminAdminAccountsHandlers(adminSvc, logger)
|
|
internalUsersHandlers := backendserver.NewInternalUsersHandlers(userSvc, logger)
|
|
|
|
userLobbyGamesHandlers := backendserver.NewUserLobbyGamesHandlers(lobbySvc, logger)
|
|
userLobbyApplicationsHandlers := backendserver.NewUserLobbyApplicationsHandlers(lobbySvc, logger)
|
|
userLobbyInvitesHandlers := backendserver.NewUserLobbyInvitesHandlers(lobbySvc, logger)
|
|
userLobbyMembershipsHandlers := backendserver.NewUserLobbyMembershipsHandlers(lobbySvc, logger)
|
|
userLobbyMyHandlers := backendserver.NewUserLobbyMyHandlers(lobbySvc, logger)
|
|
userLobbyRaceNamesHandlers := backendserver.NewUserLobbyRaceNamesHandlers(lobbySvc, logger)
|
|
adminGamesHandlers := backendserver.NewAdminGamesHandlers(lobbySvc, logger)
|
|
adminEngineVersionsHandlers := backendserver.NewAdminEngineVersionsHandlers(engineVersionSvc, logger)
|
|
adminRuntimesHandlers := backendserver.NewAdminRuntimesHandlers(runtimeSvc, logger)
|
|
adminMailHandlers := backendserver.NewAdminMailHandlers(mailSvc, logger)
|
|
adminNotificationsHandlers := backendserver.NewAdminNotificationsHandlers(notifSvc, logger)
|
|
adminGeoHandlers := backendserver.NewAdminGeoHandlers(geoSvc, logger)
|
|
userGamesHandlers := backendserver.NewUserGamesHandlers(runtimeSvc, engineCli, logger)
|
|
|
|
ready := func() bool {
|
|
return authCache.Ready() && userCache.Ready() && adminCache.Ready() && lobbyCache.Ready() && runtimeCache.Ready()
|
|
}
|
|
|
|
handler, err := backendserver.NewRouter(backendserver.RouterDependencies{
|
|
Logger: logger,
|
|
Telemetry: telemetryRT,
|
|
Ready: ready,
|
|
AdminVerifier: adminSvc,
|
|
GeoCounter: geoSvc,
|
|
PublicAuth: publicAuthHandlers,
|
|
InternalSessions: internalSessionsHandlers,
|
|
UserSessions: userSessionsHandlers,
|
|
UserAccount: userAccountHandlers,
|
|
AdminUsers: adminUsersHandlers,
|
|
AdminAdminAccounts: adminAdminAccountsHandlers,
|
|
InternalUsers: internalUsersHandlers,
|
|
UserLobbyGames: userLobbyGamesHandlers,
|
|
UserLobbyApplications: userLobbyApplicationsHandlers,
|
|
UserLobbyInvites: userLobbyInvitesHandlers,
|
|
UserLobbyMemberships: userLobbyMembershipsHandlers,
|
|
UserLobbyMy: userLobbyMyHandlers,
|
|
UserLobbyRaceNames: userLobbyRaceNamesHandlers,
|
|
AdminGames: adminGamesHandlers,
|
|
AdminRuntimes: adminRuntimesHandlers,
|
|
AdminEngineVersions: adminEngineVersionsHandlers,
|
|
AdminMail: adminMailHandlers,
|
|
AdminNotifications: adminNotificationsHandlers,
|
|
AdminGeo: adminGeoHandlers,
|
|
UserGames: userGamesHandlers,
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("build backend router: %w", err)
|
|
}
|
|
|
|
httpServer := backendserver.NewServer(cfg.HTTP, handler, logger)
|
|
pushServer := push.NewServer(cfg.GRPCPush, pushSvc, logger, telemetryRT)
|
|
metricsServer := metricsapi.NewServer(telemetryRT.PrometheusListenAddr(), telemetryRT.Handler(), logger)
|
|
lobbySweeper := lobby.NewSweeper(lobbySvc)
|
|
mailWorker := mail.NewWorker(mailSvc)
|
|
notifWorker := notification.NewWorker(notifSvc)
|
|
runtimeWorkers := runtimeSvc.Workers()
|
|
runtimeScheduler := runtimeSvc.SchedulerComponent()
|
|
runtimeReconciler := runtimeSvc.Reconciler()
|
|
|
|
components := []app.Component{httpServer, pushServer, mailWorker, notifWorker, lobbySweeper, runtimeWorkers, runtimeScheduler, runtimeReconciler}
|
|
if metricsServer.Enabled() {
|
|
components = append(components, metricsServer)
|
|
}
|
|
|
|
logger.Info("backend application starting",
|
|
zap.String("http_addr", cfg.HTTP.Addr),
|
|
zap.String("grpc_push_addr", cfg.GRPCPush.Addr),
|
|
zap.String("traces_exporter", cfg.Telemetry.TracesExporter),
|
|
zap.String("metrics_exporter", cfg.Telemetry.MetricsExporter),
|
|
zap.String("prometheus_addr", telemetryRT.PrometheusListenAddr()),
|
|
)
|
|
|
|
return app.New(cfg.ShutdownTimeout, components...).Run(ctx)
|
|
}
|
|
|
|
// authSessionRevoker adapts `*auth.Service.RevokeAllForUser` to the
|
|
// `user.SessionRevoker` interface (which returns only an error, while
|
|
// auth's method also returns the slice of revoked sessions). The svc
|
|
// field is patched by the caller after both services have been
|
|
// constructed — auth.Service depends on user.Service through
|
|
// `UserEnsurer`, while user.Service depends on auth.Service through
|
|
// `SessionRevoker`. Wiring the adapter struct first and patching the
|
|
// pointer afterwards breaks the cycle without introducing a third
|
|
// package.
|
|
type authSessionRevoker struct {
|
|
svc *auth.Service
|
|
}
|
|
|
|
func (r *authSessionRevoker) RevokeAllForUser(ctx context.Context, userID uuid.UUID, actor user.SessionRevokeActor) error {
|
|
if r == nil || r.svc == nil {
|
|
return nil
|
|
}
|
|
_, err := r.svc.RevokeAllForUser(ctx, userID, auth.RevokeContext{
|
|
ActorKind: auth.ActorKind(actor.Kind),
|
|
ActorID: actor.ID,
|
|
Reason: actor.Reason,
|
|
})
|
|
return err
|
|
}
|
|
|
|
// lobbyCascadeAdapter adapts `*lobby.Service` to the
|
|
// `user.LobbyCascade` interface. The svc field is patched after both
|
|
// services have been constructed — same dependency-cycle pattern as
|
|
// authSessionRevoker.
|
|
type lobbyCascadeAdapter struct {
|
|
svc *lobby.Service
|
|
}
|
|
|
|
func (a *lobbyCascadeAdapter) OnUserBlocked(ctx context.Context, userID uuid.UUID) error {
|
|
if a == nil || a.svc == nil {
|
|
return nil
|
|
}
|
|
return a.svc.OnUserBlocked(ctx, userID)
|
|
}
|
|
|
|
func (a *lobbyCascadeAdapter) OnUserDeleted(ctx context.Context, userID uuid.UUID) error {
|
|
if a == nil || a.svc == nil {
|
|
return nil
|
|
}
|
|
return a.svc.OnUserDeleted(ctx, userID)
|
|
}
|
|
|
|
// userEntitlementAdapter adapts `*user.Service.GetEntitlementSnapshot`
|
|
// to the `lobby.EntitlementProvider` interface. Lobby reads the
|
|
// `MaxRegisteredRaceNames` field at race-name registration time to
|
|
// enforce the per-tier quota.
|
|
type userEntitlementAdapter struct {
|
|
svc *user.Service
|
|
}
|
|
|
|
func (a *userEntitlementAdapter) GetMaxRegisteredRaceNames(ctx context.Context, userID uuid.UUID) (int32, error) {
|
|
if a == nil || a.svc == nil {
|
|
return 1, nil
|
|
}
|
|
snap, err := a.svc.GetEntitlementSnapshot(ctx, userID)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
return snap.MaxRegisteredRaceNames, nil
|
|
}
|
|
|
|
// runtimeGatewayAdapter implements `lobby.RuntimeGateway` by
|
|
// delegating to `*runtime.Service`. The svc pointer is patched after
|
|
// the services are constructed — runtime depends on lobby
|
|
// (LobbyConsumer), so we wire the adapter first and patch it once
|
|
// runtimeSvc exists.
|
|
type runtimeGatewayAdapter struct {
|
|
svc *runtime.Service
|
|
}
|
|
|
|
func (a *runtimeGatewayAdapter) StartGame(ctx context.Context, gameID uuid.UUID) error {
|
|
if a == nil || a.svc == nil {
|
|
return nil
|
|
}
|
|
return a.svc.StartGame(ctx, gameID)
|
|
}
|
|
|
|
func (a *runtimeGatewayAdapter) StopGame(ctx context.Context, gameID uuid.UUID) error {
|
|
if a == nil || a.svc == nil {
|
|
return nil
|
|
}
|
|
return a.svc.StopGame(ctx, gameID)
|
|
}
|
|
|
|
func (a *runtimeGatewayAdapter) PauseGame(ctx context.Context, gameID uuid.UUID) error {
|
|
if a == nil || a.svc == nil {
|
|
return nil
|
|
}
|
|
return a.svc.PauseGame(ctx, gameID)
|
|
}
|
|
|
|
func (a *runtimeGatewayAdapter) ResumeGame(ctx context.Context, gameID uuid.UUID) error {
|
|
if a == nil || a.svc == nil {
|
|
return nil
|
|
}
|
|
return a.svc.ResumeGame(ctx, gameID)
|
|
}
|
|
|
|
// lobbyConsumerAdapter implements `runtime.LobbyConsumer` by
|
|
// translating runtime DTOs into the lobby package's vocabulary.
|
|
type lobbyConsumerAdapter struct {
|
|
svc *lobby.Service
|
|
}
|
|
|
|
func (a *lobbyConsumerAdapter) OnRuntimeSnapshot(ctx context.Context, gameID uuid.UUID, snapshot runtime.LobbySnapshot) error {
|
|
if a == nil || a.svc == nil {
|
|
return nil
|
|
}
|
|
stats := make([]lobby.PlayerTurnStats, 0, len(snapshot.PlayerStats))
|
|
for _, s := range snapshot.PlayerStats {
|
|
stats = append(stats, lobby.PlayerTurnStats{
|
|
UserID: s.UserID,
|
|
InitialPlanets: s.InitialPlanets,
|
|
InitialPopulation: s.InitialPopulation,
|
|
CurrentPlanets: s.CurrentPlanets,
|
|
CurrentPopulation: s.CurrentPopulation,
|
|
MaxPlanets: s.MaxPlanets,
|
|
MaxPopulation: s.MaxPopulation,
|
|
})
|
|
}
|
|
return a.svc.OnRuntimeSnapshot(ctx, gameID, lobby.RuntimeSnapshot{
|
|
CurrentTurn: snapshot.CurrentTurn,
|
|
RuntimeStatus: snapshot.RuntimeStatus,
|
|
EngineHealth: snapshot.EngineHealth,
|
|
ObservedAt: snapshot.ObservedAt,
|
|
PlayerStats: stats,
|
|
})
|
|
}
|
|
|
|
func (a *lobbyConsumerAdapter) OnRuntimeJobResult(ctx context.Context, gameID uuid.UUID, result runtime.JobResult) error {
|
|
if a == nil || a.svc == nil {
|
|
return nil
|
|
}
|
|
return a.svc.OnRuntimeJobResult(ctx, gameID, lobby.RuntimeJobResult{
|
|
Op: result.Op,
|
|
Status: result.Status,
|
|
Message: result.Message,
|
|
})
|
|
}
|
|
|
|
// userNotificationCascadeAdapter implements
|
|
// `user.NotificationCascade` by delegating to `*notification.Service`.
|
|
// Construction order: user.Service depends on the cascade and is
|
|
// built before notification.Service. The svc pointer is patched once
|
|
// notifSvc exists.
|
|
type userNotificationCascadeAdapter struct {
|
|
svc *notification.Service
|
|
}
|
|
|
|
func (a *userNotificationCascadeAdapter) OnUserDeleted(ctx context.Context, userID uuid.UUID) error {
|
|
if a == nil || a.svc == nil {
|
|
return nil
|
|
}
|
|
return a.svc.OnUserDeleted(ctx, userID)
|
|
}
|
|
|
|
// lobbyNotificationPublisherAdapter implements
|
|
// `lobby.NotificationPublisher` by translating each LobbyNotification
|
|
// into a notification.Intent through the publisher Adapter exposed by
|
|
// notification.Service.
|
|
type lobbyNotificationPublisherAdapter struct {
|
|
svc *notification.Service
|
|
}
|
|
|
|
func (a *lobbyNotificationPublisherAdapter) PublishLobbyEvent(ctx context.Context, ev lobby.LobbyNotification) error {
|
|
if a == nil || a.svc == nil {
|
|
return nil
|
|
}
|
|
return a.svc.LobbyAdapter().PublishLobbyEvent(ctx, ev)
|
|
}
|
|
|
|
// runtimeNotificationPublisherAdapter implements
|
|
// `runtime.NotificationPublisher` by delegating to the runtime adapter
|
|
// exposed by notification.Service.
|
|
type runtimeNotificationPublisherAdapter struct {
|
|
svc *notification.Service
|
|
}
|
|
|
|
func (a *runtimeNotificationPublisherAdapter) PublishRuntimeEvent(ctx context.Context, kind, idempotencyKey string, payload map[string]any) error {
|
|
if a == nil || a.svc == nil {
|
|
return nil
|
|
}
|
|
return a.svc.RuntimeAdapter().PublishRuntimeEvent(ctx, kind, idempotencyKey, payload)
|
|
}
|