Files
Ilia Denisov 9f7c9099bc
Tests · Go / test (push) Successful in 1m59s
Tests · Go / test (pull_request) Successful in 2m1s
Tests · Integration / integration (pull_request) Successful in 1m37s
diplomail (Stage E): LibreTranslate client + async translation worker
Synchronous translation on read (Stage D) blocks the HTTP handler on
translator I/O. Stage E switches to "send moments-fast, deliver
when translated": recipients whose preferred_language differs from
the detected body_lang are inserted with available_at=NULL, and an
async worker turns them on once a LibreTranslate call materialises
the cache row (or fails terminally after 5 retries).

Schema delta on diplomail_recipients: available_at,
translation_attempts, next_translation_attempt_at, plus a snapshot
recipient_preferred_language so the worker queries do not need a
join. Read paths (ListInbox, GetMessage, UnreadCount) filter on
available_at IS NOT NULL. Push fan-out is moved from Service to the
worker so the recipient only sees the toast when the inbox row is
actually visible.

Translator backend is now a configurable choice: empty
BACKEND_DIPLOMAIL_TRANSLATOR_URL → noop (deliver original);
populated → LibreTranslate HTTP client. Per-attempt timeout, max
attempts, and worker interval all live in DiplomailConfig. The HTTP
client itself is unit-tested via httptest (happy path, BCP47
normalisation, unsupported pair, 5xx, identical src/dst, missing
URL); worker delivery + fallback paths are covered by the
testcontainers-backed e2e tests in diplomail_e2e_test.go.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-15 20:15:28 +02:00

896 lines
30 KiB
Go

// Command backend boots the Galaxy backend process. It loads configuration,
// initialises telemetry and the structured logger, opens the Postgres pool,
// applies embedded migrations, and runs the HTTP, gRPC push, and (optional)
// Prometheus metrics listeners until SIGINT or SIGTERM triggers an orderly
// shutdown.
package main
import (
"context"
"errors"
"fmt"
"os"
"os/signal"
"syscall"
"time"
// time/tzdata embeds the IANA timezone database so time.LoadLocation
// works in container images without /usr/share/zoneinfo (distroless
// static, alpine without the tzdata apk). The auth and user-settings
// flows validate the caller's `time_zone` via time.LoadLocation;
// without this import only "UTC" and fixed offsets would resolve.
_ "time/tzdata"
"galaxy/backend/internal/admin"
"galaxy/backend/internal/app"
"galaxy/backend/internal/auth"
"galaxy/backend/internal/config"
"galaxy/backend/internal/devsandbox"
"galaxy/backend/internal/diplomail"
"galaxy/backend/internal/diplomail/detector"
"galaxy/backend/internal/diplomail/translator"
"galaxy/backend/internal/dockerclient"
"galaxy/backend/internal/engineclient"
"galaxy/backend/internal/geo"
"galaxy/backend/internal/lobby"
"galaxy/backend/internal/logging"
"galaxy/backend/internal/mail"
"galaxy/backend/internal/metricsapi"
"galaxy/backend/internal/notification"
backendpostgres "galaxy/backend/internal/postgres"
"galaxy/backend/push"
"galaxy/backend/internal/runtime"
backendserver "galaxy/backend/internal/server"
"galaxy/backend/internal/telemetry"
"galaxy/backend/internal/user"
mobyclient "github.com/moby/moby/client"
"github.com/google/uuid"
"go.uber.org/zap"
)
func main() {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer cancel()
if err := run(ctx); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func run(ctx context.Context) (err error) {
cfg, err := config.LoadFromEnv()
if err != nil {
return fmt.Errorf("load backend config: %w", err)
}
logger, err := logging.New(cfg.Logging)
if err != nil {
return fmt.Errorf("build backend logger: %w", err)
}
defer func() {
err = errors.Join(err, logging.Sync(logger))
}()
telemetryRT, err := telemetry.New(ctx, logger, cfg.Telemetry)
if err != nil {
return fmt.Errorf("build backend telemetry: %w", err)
}
defer func() {
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
defer shutdownCancel()
err = errors.Join(err, telemetryRT.Shutdown(shutdownCtx))
}()
db, err := backendpostgres.Open(ctx, cfg.Postgres, telemetryRT)
if err != nil {
return fmt.Errorf("open backend postgres pool: %w", err)
}
defer func() {
err = errors.Join(err, db.Close())
}()
if err := backendpostgres.ApplyMigrations(ctx, db); err != nil {
return fmt.Errorf("apply backend migrations: %w", err)
}
pushSvc, err := push.NewService(push.ServiceConfig{FreshnessWindow: cfg.FreshnessWindow}, logger, telemetryRT)
if err != nil {
return fmt.Errorf("build backend push service: %w", err)
}
geoSvc, err := geo.NewService(cfg.GeoIP.DBPath, db)
if err != nil {
return fmt.Errorf("build backend geo service: %w", err)
}
geoSvc.SetLogger(logger)
defer func() {
// Drain pending counter goroutines first so their upserts run
// against a still-open Postgres pool, then release the
// GeoLite2 resolver. Drain is bounded by cfg.ShutdownTimeout
// so a stuck DB cannot indefinitely delay process exit.
drainCtx, drainCancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
defer drainCancel()
geoSvc.Drain(drainCtx)
err = errors.Join(err, geoSvc.Close())
}()
userStore := user.NewStore(db)
userCache := user.NewCache()
authStore := auth.NewStore(db)
authCache := auth.NewCache()
if err := authCache.Warm(ctx, authStore); err != nil {
return fmt.Errorf("warm backend auth cache: %w", err)
}
logger.Info("auth cache warmed", zap.Int("active_sessions", authCache.Size()))
// auth.Service depends on user.Service through SessionRevoker, but
// user.Service depends on auth.Service through the lobby cascade
// path. Each cyclic dependency is resolved with a tiny adapter
// struct whose inner pointer is patched once both services exist.
revoker := &authSessionRevoker{}
lobbyCascade := &lobbyCascadeAdapter{}
userNotifyCascade := &userNotificationCascadeAdapter{}
lobbyNotifyPublisher := &lobbyNotificationPublisherAdapter{}
lobbyDiplomailPublisher := &lobbyDiplomailPublisherAdapter{}
runtimeNotifyPublisher := &runtimeNotificationPublisherAdapter{}
userSvc := user.NewService(user.Deps{
Store: userStore,
Cache: userCache,
Lobby: lobbyCascade,
Notification: userNotifyCascade,
Geo: geoSvc,
SessionRevoker: revoker,
UserNameMaxRetries: cfg.Auth.UserNameMaxRetries,
Logger: logger,
})
if err := userCache.Warm(ctx, userStore); err != nil {
return fmt.Errorf("warm backend user entitlement cache: %w", err)
}
logger.Info("user entitlement cache warmed", zap.Int("snapshots", userCache.Size()))
mailStore := mail.NewStore(db)
mailSender, err := mail.NewSMTPSender(cfg.SMTP, logger)
if err != nil {
return fmt.Errorf("build mail smtp sender: %w", err)
}
mailSvc := mail.NewService(mail.Deps{
Store: mailStore,
SMTP: mailSender,
Admin: mail.NewNoopAdminNotifier(logger),
Config: cfg.Mail,
Logger: logger,
})
authSvc := auth.NewService(auth.Deps{
Store: authStore,
Cache: authCache,
User: userSvc,
Geo: geoSvc,
Mail: mailSvc,
Push: pushSvc,
Config: cfg.Auth,
Logger: logger,
})
revoker.svc = authSvc
adminStore := admin.NewStore(db)
adminCache := admin.NewCache()
if err := admin.Bootstrap(ctx, adminStore, cfg.Admin, logger); err != nil {
return fmt.Errorf("admin bootstrap: %w", err)
}
adminSvc := admin.NewService(admin.Deps{
Store: adminStore,
Cache: adminCache,
Logger: logger,
})
if err := adminCache.Warm(ctx, adminStore); err != nil {
return fmt.Errorf("warm backend admin cache: %w", err)
}
logger.Info("admin cache warmed", zap.Int("admins", adminCache.Size()))
runtimeGateway := &runtimeGatewayAdapter{}
lobbyStore := lobby.NewStore(db)
lobbyCache := lobby.NewCache()
lobbySvc, err := lobby.NewService(lobby.Deps{
Store: lobbyStore,
Cache: lobbyCache,
Runtime: runtimeGateway,
Notification: lobbyNotifyPublisher,
Diplomail: lobbyDiplomailPublisher,
Entitlement: &userEntitlementAdapter{svc: userSvc},
Config: cfg.Lobby,
Logger: logger,
})
if err != nil {
return fmt.Errorf("build backend lobby service: %w", err)
}
if err := lobbyCache.Warm(ctx, lobbyStore); err != nil {
return fmt.Errorf("warm backend lobby cache: %w", err)
}
games, members, raceNames := lobbyCache.Sizes()
logger.Info("lobby cache warmed",
zap.Int("games", games),
zap.Int("memberships", members),
zap.Int("race_names", raceNames),
)
lobbyCascade.svc = lobbySvc
dockerCli, err := mobyclient.New(mobyclient.WithHost(cfg.Docker.Host))
if err != nil {
return fmt.Errorf("build docker client: %w", err)
}
dockerAdapter, err := dockerclient.NewAdapter(dockerclient.AdapterConfig{Docker: dockerCli})
if err != nil {
return fmt.Errorf("build docker adapter: %w", err)
}
if err := dockerAdapter.EnsureNetwork(ctx, cfg.Docker.Network); err != nil {
return fmt.Errorf("docker network %q: %w", cfg.Docker.Network, err)
}
engineCli, err := engineclient.NewClient(engineclient.Config{
CallTimeout: cfg.Engine.CallTimeout,
ProbeTimeout: cfg.Engine.ProbeTimeout,
})
if err != nil {
return fmt.Errorf("build engine client: %w", err)
}
defer func() {
err = errors.Join(err, engineCli.Close())
}()
runtimeStore := runtime.NewStore(db)
runtimeCache := runtime.NewCache()
engineVersionSvc := runtime.NewEngineVersionService(runtimeStore, runtimeCache, nil)
runtimeSvc, err := runtime.NewService(runtime.Deps{
Store: runtimeStore,
Cache: runtimeCache,
EngineVersions: engineVersionSvc,
Docker: dockerAdapter,
Engine: engineCli,
Lobby: &lobbyConsumerAdapter{svc: lobbySvc},
Notification: runtimeNotifyPublisher,
DockerNetwork: cfg.Docker.Network,
HostStateRoot: cfg.Game.StateRoot,
Config: cfg.Runtime,
Logger: logger,
})
if err != nil {
return fmt.Errorf("build runtime service: %w", err)
}
if err := runtimeCache.Warm(ctx, runtimeStore); err != nil {
return fmt.Errorf("warm backend runtime cache: %w", err)
}
rtRecords, rtVersions := runtimeCache.Sizes()
logger.Info("runtime cache warmed",
zap.Int("active_runtimes", rtRecords),
zap.Int("engine_versions", rtVersions),
)
runtimeGateway.svc = runtimeSvc
// Run a single reconciliation pass before the dev-sandbox
// bootstrap so any runtime row pointing at a vanished engine
// container (host reboot wiped /tmp/galaxy-game-state/<uuid>;
// `tools/local-dev`'s `prune-broken-engines` target reaped the
// husk) is already cascaded through `markRemoved` → lobby
// `cancelled` by the time the bootstrap walks the sandbox list.
// Without this pre-tick the bootstrap would reuse the
// soon-to-be-cancelled game and force the developer into a
// second `make up` cycle to land a healthy sandbox. Failures are
// non-fatal: the periodic ticker started later catches up, and
// the worst case degrades to the legacy two-cycle recovery.
if err := runtimeSvc.Reconciler().Tick(ctx); err != nil {
logger.Warn("pre-bootstrap reconciler tick failed", zap.Error(err))
}
if err := devsandbox.Bootstrap(ctx, devsandbox.Deps{
Users: userSvc,
Lobby: lobbySvc,
EngineVersions: engineVersionSvc,
}, cfg.DevSandbox, logger); err != nil {
return fmt.Errorf("dev sandbox bootstrap: %w", err)
}
notifStore := notification.NewStore(db)
notifSvc := notification.NewService(notification.Deps{
Store: notifStore,
Mail: mailSvc,
Push: pushSvc,
Accounts: userSvc,
Config: cfg.Notification,
Logger: logger,
})
userNotifyCascade.svc = notifSvc
lobbyNotifyPublisher.svc = notifSvc
runtimeNotifyPublisher.svc = notifSvc
diplomailStore := diplomail.NewStore(db)
diplomailTranslator, err := buildDiplomailTranslator(cfg.Diplomail, logger)
if err != nil {
return fmt.Errorf("build diplomail translator: %w", err)
}
diplomailSvc := diplomail.NewService(diplomail.Deps{
Store: diplomailStore,
Memberships: &diplomailMembershipAdapter{lobby: lobbySvc, users: userSvc},
Notification: &diplomailNotificationPublisherAdapter{svc: notifSvc},
Entitlements: &diplomailEntitlementAdapter{users: userSvc},
Games: &diplomailGameAdapter{lobby: lobbySvc},
Detector: detector.New(),
Translator: diplomailTranslator,
Config: cfg.Diplomail,
Logger: logger,
})
lobbyDiplomailPublisher.svc = diplomailSvc
diplomailWorker := diplomail.NewWorker(diplomailSvc)
if email := cfg.Notification.AdminEmail; email == "" {
logger.Info("notification admin email not configured (BACKEND_NOTIFICATION_ADMIN_EMAIL); admin-channel routes will be skipped")
} else {
logger.Info("notification admin email configured", zap.String("admin_email", email))
}
publicAuthHandlers := backendserver.NewPublicAuthHandlers(authSvc, logger)
internalSessionsHandlers := backendserver.NewInternalSessionsHandlers(authSvc, logger)
userSessionsHandlers := backendserver.NewUserSessionsHandlers(authSvc, logger)
userAccountHandlers := backendserver.NewUserAccountHandlers(userSvc, logger)
adminUsersHandlers := backendserver.NewAdminUsersHandlers(userSvc, logger)
adminAdminAccountsHandlers := backendserver.NewAdminAdminAccountsHandlers(adminSvc, logger)
internalUsersHandlers := backendserver.NewInternalUsersHandlers(userSvc, logger)
userLobbyGamesHandlers := backendserver.NewUserLobbyGamesHandlers(lobbySvc, logger)
userLobbyApplicationsHandlers := backendserver.NewUserLobbyApplicationsHandlers(lobbySvc, logger)
userLobbyInvitesHandlers := backendserver.NewUserLobbyInvitesHandlers(lobbySvc, logger)
userLobbyMembershipsHandlers := backendserver.NewUserLobbyMembershipsHandlers(lobbySvc, logger)
userLobbyMyHandlers := backendserver.NewUserLobbyMyHandlers(lobbySvc, logger)
userLobbyRaceNamesHandlers := backendserver.NewUserLobbyRaceNamesHandlers(lobbySvc, logger)
adminGamesHandlers := backendserver.NewAdminGamesHandlers(lobbySvc, logger)
adminEngineVersionsHandlers := backendserver.NewAdminEngineVersionsHandlers(engineVersionSvc, logger)
adminRuntimesHandlers := backendserver.NewAdminRuntimesHandlers(runtimeSvc, logger)
adminMailHandlers := backendserver.NewAdminMailHandlers(mailSvc, logger)
adminDiplomailHandlers := backendserver.NewAdminDiplomailHandlers(diplomailSvc, logger)
adminNotificationsHandlers := backendserver.NewAdminNotificationsHandlers(notifSvc, logger)
adminGeoHandlers := backendserver.NewAdminGeoHandlers(geoSvc, logger)
userGamesHandlers := backendserver.NewUserGamesHandlers(runtimeSvc, engineCli, logger)
userMailHandlers := backendserver.NewUserMailHandlers(diplomailSvc, lobbySvc, userSvc, logger)
ready := func() bool {
return authCache.Ready() && userCache.Ready() && adminCache.Ready() && lobbyCache.Ready() && runtimeCache.Ready()
}
handler, err := backendserver.NewRouter(backendserver.RouterDependencies{
Logger: logger,
Telemetry: telemetryRT,
Ready: ready,
AdminVerifier: adminSvc,
GeoCounter: geoSvc,
PublicAuth: publicAuthHandlers,
InternalSessions: internalSessionsHandlers,
UserSessions: userSessionsHandlers,
UserAccount: userAccountHandlers,
AdminUsers: adminUsersHandlers,
AdminAdminAccounts: adminAdminAccountsHandlers,
InternalUsers: internalUsersHandlers,
UserLobbyGames: userLobbyGamesHandlers,
UserLobbyApplications: userLobbyApplicationsHandlers,
UserLobbyInvites: userLobbyInvitesHandlers,
UserLobbyMemberships: userLobbyMembershipsHandlers,
UserLobbyMy: userLobbyMyHandlers,
UserLobbyRaceNames: userLobbyRaceNamesHandlers,
AdminGames: adminGamesHandlers,
AdminRuntimes: adminRuntimesHandlers,
AdminEngineVersions: adminEngineVersionsHandlers,
AdminMail: adminMailHandlers,
AdminDiplomail: adminDiplomailHandlers,
AdminNotifications: adminNotificationsHandlers,
AdminGeo: adminGeoHandlers,
UserGames: userGamesHandlers,
UserMail: userMailHandlers,
})
if err != nil {
return fmt.Errorf("build backend router: %w", err)
}
httpServer := backendserver.NewServer(cfg.HTTP, handler, logger)
pushServer := push.NewServer(cfg.GRPCPush, pushSvc, logger, telemetryRT)
metricsServer := metricsapi.NewServer(telemetryRT.PrometheusListenAddr(), telemetryRT.Handler(), logger)
lobbySweeper := lobby.NewSweeper(lobbySvc)
mailWorker := mail.NewWorker(mailSvc)
notifWorker := notification.NewWorker(notifSvc)
runtimeWorkers := runtimeSvc.Workers()
runtimeScheduler := runtimeSvc.SchedulerComponent()
runtimeReconciler := runtimeSvc.Reconciler()
components := []app.Component{httpServer, pushServer, mailWorker, notifWorker, diplomailWorker, lobbySweeper, runtimeWorkers, runtimeScheduler, runtimeReconciler}
if metricsServer.Enabled() {
components = append(components, metricsServer)
}
logger.Info("backend application starting",
zap.String("http_addr", cfg.HTTP.Addr),
zap.String("grpc_push_addr", cfg.GRPCPush.Addr),
zap.String("traces_exporter", cfg.Telemetry.TracesExporter),
zap.String("metrics_exporter", cfg.Telemetry.MetricsExporter),
zap.String("prometheus_addr", telemetryRT.PrometheusListenAddr()),
)
return app.New(cfg.ShutdownTimeout, components...).Run(ctx)
}
// authSessionRevoker adapts `*auth.Service.RevokeAllForUser` to the
// `user.SessionRevoker` interface (which returns only an error, while
// auth's method also returns the slice of revoked sessions). The svc
// field is patched by the caller after both services have been
// constructed — auth.Service depends on user.Service through
// `UserEnsurer`, while user.Service depends on auth.Service through
// `SessionRevoker`. Wiring the adapter struct first and patching the
// pointer afterwards breaks the cycle without introducing a third
// package.
type authSessionRevoker struct {
svc *auth.Service
}
func (r *authSessionRevoker) RevokeAllForUser(ctx context.Context, userID uuid.UUID, actor user.SessionRevokeActor) error {
if r == nil || r.svc == nil {
return nil
}
_, err := r.svc.RevokeAllForUser(ctx, userID, auth.RevokeContext{
ActorKind: auth.ActorKind(actor.Kind),
ActorID: actor.ID,
Reason: actor.Reason,
})
return err
}
// lobbyCascadeAdapter adapts `*lobby.Service` to the
// `user.LobbyCascade` interface. The svc field is patched after both
// services have been constructed — same dependency-cycle pattern as
// authSessionRevoker.
type lobbyCascadeAdapter struct {
svc *lobby.Service
}
func (a *lobbyCascadeAdapter) OnUserBlocked(ctx context.Context, userID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.OnUserBlocked(ctx, userID)
}
func (a *lobbyCascadeAdapter) OnUserDeleted(ctx context.Context, userID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.OnUserDeleted(ctx, userID)
}
// userEntitlementAdapter adapts `*user.Service.GetEntitlementSnapshot`
// to the `lobby.EntitlementProvider` interface. Lobby reads the
// `MaxRegisteredRaceNames` field at race-name registration time to
// enforce the per-tier quota.
type userEntitlementAdapter struct {
svc *user.Service
}
func (a *userEntitlementAdapter) GetMaxRegisteredRaceNames(ctx context.Context, userID uuid.UUID) (int32, error) {
if a == nil || a.svc == nil {
return 1, nil
}
snap, err := a.svc.GetEntitlementSnapshot(ctx, userID)
if err != nil {
return 0, err
}
return snap.MaxRegisteredRaceNames, nil
}
// runtimeGatewayAdapter implements `lobby.RuntimeGateway` by
// delegating to `*runtime.Service`. The svc pointer is patched after
// the services are constructed — runtime depends on lobby
// (LobbyConsumer), so we wire the adapter first and patch it once
// runtimeSvc exists.
type runtimeGatewayAdapter struct {
svc *runtime.Service
}
func (a *runtimeGatewayAdapter) StartGame(ctx context.Context, gameID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.StartGame(ctx, gameID)
}
func (a *runtimeGatewayAdapter) StopGame(ctx context.Context, gameID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.StopGame(ctx, gameID)
}
func (a *runtimeGatewayAdapter) PauseGame(ctx context.Context, gameID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.PauseGame(ctx, gameID)
}
func (a *runtimeGatewayAdapter) ResumeGame(ctx context.Context, gameID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.ResumeGame(ctx, gameID)
}
// lobbyConsumerAdapter implements `runtime.LobbyConsumer` by
// translating runtime DTOs into the lobby package's vocabulary.
type lobbyConsumerAdapter struct {
svc *lobby.Service
}
func (a *lobbyConsumerAdapter) OnRuntimeSnapshot(ctx context.Context, gameID uuid.UUID, snapshot runtime.LobbySnapshot) error {
if a == nil || a.svc == nil {
return nil
}
stats := make([]lobby.PlayerTurnStats, 0, len(snapshot.PlayerStats))
for _, s := range snapshot.PlayerStats {
stats = append(stats, lobby.PlayerTurnStats{
UserID: s.UserID,
InitialPlanets: s.InitialPlanets,
InitialPopulation: s.InitialPopulation,
CurrentPlanets: s.CurrentPlanets,
CurrentPopulation: s.CurrentPopulation,
MaxPlanets: s.MaxPlanets,
MaxPopulation: s.MaxPopulation,
})
}
return a.svc.OnRuntimeSnapshot(ctx, gameID, lobby.RuntimeSnapshot{
CurrentTurn: snapshot.CurrentTurn,
RuntimeStatus: snapshot.RuntimeStatus,
EngineHealth: snapshot.EngineHealth,
ObservedAt: snapshot.ObservedAt,
PlayerStats: stats,
})
}
func (a *lobbyConsumerAdapter) OnRuntimeJobResult(ctx context.Context, gameID uuid.UUID, result runtime.JobResult) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.OnRuntimeJobResult(ctx, gameID, lobby.RuntimeJobResult{
Op: result.Op,
Status: result.Status,
Message: result.Message,
})
}
// userNotificationCascadeAdapter implements
// `user.NotificationCascade` by delegating to `*notification.Service`.
// Construction order: user.Service depends on the cascade and is
// built before notification.Service. The svc pointer is patched once
// notifSvc exists.
type userNotificationCascadeAdapter struct {
svc *notification.Service
}
func (a *userNotificationCascadeAdapter) OnUserDeleted(ctx context.Context, userID uuid.UUID) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.OnUserDeleted(ctx, userID)
}
// lobbyNotificationPublisherAdapter implements
// `lobby.NotificationPublisher` by translating each LobbyNotification
// into a notification.Intent through the publisher Adapter exposed by
// notification.Service.
type lobbyNotificationPublisherAdapter struct {
svc *notification.Service
}
func (a *lobbyNotificationPublisherAdapter) PublishLobbyEvent(ctx context.Context, ev lobby.LobbyNotification) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.LobbyAdapter().PublishLobbyEvent(ctx, ev)
}
// runtimeNotificationPublisherAdapter implements
// `runtime.NotificationPublisher` by delegating to the runtime adapter
// exposed by notification.Service.
type runtimeNotificationPublisherAdapter struct {
svc *notification.Service
}
func (a *runtimeNotificationPublisherAdapter) PublishRuntimeEvent(ctx context.Context, kind, idempotencyKey string, payload map[string]any) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.RuntimeAdapter().PublishRuntimeEvent(ctx, kind, idempotencyKey, payload)
}
// diplomailMembershipAdapter implements `diplomail.MembershipLookup`
// by walking the lobby cache (for active rows) and the lobby service
// (for any-status rows) and stitching each membership row to the
// immutable `accounts.user_name` resolved through `*user.Service`.
type diplomailMembershipAdapter struct {
lobby *lobby.Service
users *user.Service
}
func (a *diplomailMembershipAdapter) GetActiveMembership(ctx context.Context, gameID, userID uuid.UUID) (diplomail.ActiveMembership, error) {
if a == nil || a.lobby == nil || a.users == nil {
return diplomail.ActiveMembership{}, diplomail.ErrNotFound
}
cache := a.lobby.Cache()
if cache == nil {
return diplomail.ActiveMembership{}, diplomail.ErrNotFound
}
game, ok := cache.GetGame(gameID)
if !ok {
return diplomail.ActiveMembership{}, diplomail.ErrNotFound
}
var found *lobby.Membership
for _, m := range cache.MembershipsForGame(gameID) {
if m.UserID == userID {
mm := m
found = &mm
break
}
}
if found == nil {
return diplomail.ActiveMembership{}, diplomail.ErrNotFound
}
account, err := a.users.GetAccount(ctx, userID)
if err != nil {
return diplomail.ActiveMembership{}, err
}
return diplomail.ActiveMembership{
UserID: userID,
GameID: gameID,
GameName: game.GameName,
UserName: account.UserName,
RaceName: found.RaceName,
PreferredLanguage: account.PreferredLanguage,
}, nil
}
func (a *diplomailMembershipAdapter) GetMembershipAnyStatus(ctx context.Context, gameID, userID uuid.UUID) (diplomail.MemberSnapshot, error) {
if a == nil || a.lobby == nil || a.users == nil {
return diplomail.MemberSnapshot{}, diplomail.ErrNotFound
}
game, ok := a.lobby.Cache().GetGame(gameID)
if !ok {
return diplomail.MemberSnapshot{}, diplomail.ErrNotFound
}
members, err := a.lobby.ListMembershipsForGame(ctx, gameID)
if err != nil {
return diplomail.MemberSnapshot{}, err
}
var found *lobby.Membership
for _, m := range members {
if m.UserID == userID {
mm := m
found = &mm
break
}
}
if found == nil {
return diplomail.MemberSnapshot{}, diplomail.ErrNotFound
}
account, err := a.users.GetAccount(ctx, userID)
if err != nil {
return diplomail.MemberSnapshot{}, err
}
return diplomail.MemberSnapshot{
UserID: userID,
GameID: gameID,
GameName: game.GameName,
UserName: account.UserName,
RaceName: found.RaceName,
Status: found.Status,
PreferredLanguage: account.PreferredLanguage,
}, nil
}
func (a *diplomailMembershipAdapter) ListMembers(ctx context.Context, gameID uuid.UUID, scope string) ([]diplomail.MemberSnapshot, error) {
if a == nil || a.lobby == nil || a.users == nil {
return nil, diplomail.ErrNotFound
}
game, ok := a.lobby.Cache().GetGame(gameID)
if !ok {
return nil, diplomail.ErrNotFound
}
members, err := a.lobby.ListMembershipsForGame(ctx, gameID)
if err != nil {
return nil, err
}
matches := func(status string) bool {
switch scope {
case diplomail.RecipientScopeActive:
return status == lobby.MembershipStatusActive
case diplomail.RecipientScopeActiveAndRemoved:
return status == lobby.MembershipStatusActive || status == lobby.MembershipStatusRemoved
case diplomail.RecipientScopeAllMembers:
return true
default:
return status == lobby.MembershipStatusActive
}
}
out := make([]diplomail.MemberSnapshot, 0, len(members))
for _, m := range members {
if !matches(m.Status) {
continue
}
account, err := a.users.GetAccount(ctx, m.UserID)
if err != nil {
return nil, fmt.Errorf("resolve user_name for %s: %w", m.UserID, err)
}
out = append(out, diplomail.MemberSnapshot{
UserID: m.UserID,
GameID: gameID,
GameName: game.GameName,
UserName: account.UserName,
RaceName: m.RaceName,
Status: m.Status,
PreferredLanguage: account.PreferredLanguage,
})
}
return out, nil
}
// lobbyDiplomailPublisherAdapter implements `lobby.DiplomailPublisher`
// by translating each lobby.LifecycleEvent into the diplomail
// vocabulary and delegating to `*diplomail.Service.PublishLifecycle`.
// The svc pointer is patched once diplomailSvc exists — diplomail
// depends on lobby through MembershipLookup, so the lobby service
// is constructed first and patched up.
type lobbyDiplomailPublisherAdapter struct {
svc *diplomail.Service
}
func (a *lobbyDiplomailPublisherAdapter) PublishLifecycle(ctx context.Context, ev lobby.LifecycleEvent) error {
if a == nil || a.svc == nil {
return nil
}
return a.svc.PublishLifecycle(ctx, diplomail.LifecycleEvent{
GameID: ev.GameID,
Kind: ev.Kind,
Actor: ev.Actor,
Reason: ev.Reason,
TargetUser: ev.TargetUser,
})
}
// buildDiplomailTranslator selects the diplomail translator backend
// from configuration: a non-empty `TranslatorURL` constructs the
// LibreTranslate HTTP client; an empty URL falls through to the
// noop translator so deployments without a translation service still
// boot and deliver mail with the fallback path.
func buildDiplomailTranslator(cfg config.DiplomailConfig, logger *zap.Logger) (translator.Translator, error) {
if cfg.TranslatorURL == "" {
logger.Info("diplomail translator URL not configured, using noop translator")
return translator.NewNoop(), nil
}
return translator.NewLibreTranslate(translator.LibreTranslateConfig{
URL: cfg.TranslatorURL,
Timeout: cfg.TranslatorTimeout,
})
}
// diplomailEntitlementAdapter implements
// `diplomail.EntitlementReader` by reading the user-service
// entitlement snapshot. The IsPaid flag mirrors the per-tier policy
// defined in `internal/user`, so updates to the tier set (monthly,
// yearly, permanent, …) flow through without changes here.
type diplomailEntitlementAdapter struct {
users *user.Service
}
func (a *diplomailEntitlementAdapter) IsPaidTier(ctx context.Context, userID uuid.UUID) (bool, error) {
if a == nil || a.users == nil {
return false, nil
}
snap, err := a.users.GetEntitlementSnapshot(ctx, userID)
if err != nil {
return false, err
}
return snap.IsPaid, nil
}
// diplomailGameAdapter implements `diplomail.GameLookup`. The
// running-games and finished-games queries walk the lobby cache so
// the admin multi-game broadcast and bulk-purge endpoints do not
// fan out a per-game DB query each time. GetGame falls back to the
// cache; an unknown id is surfaced as ErrNotFound (the diplomail
// sentinel).
type diplomailGameAdapter struct {
lobby *lobby.Service
}
func (a *diplomailGameAdapter) ListRunningGames(_ context.Context) ([]diplomail.GameSnapshot, error) {
if a == nil || a.lobby == nil || a.lobby.Cache() == nil {
return nil, nil
}
var out []diplomail.GameSnapshot
for _, game := range a.lobby.Cache().ListGames() {
if !isRunningStatus(game.Status) {
continue
}
out = append(out, gameSnapshot(game))
}
return out, nil
}
func (a *diplomailGameAdapter) ListFinishedGamesBefore(ctx context.Context, cutoff time.Time) ([]diplomail.GameSnapshot, error) {
if a == nil || a.lobby == nil {
return nil, nil
}
games, err := a.lobby.ListFinishedGamesBefore(ctx, cutoff)
if err != nil {
return nil, err
}
out := make([]diplomail.GameSnapshot, 0, len(games))
for _, g := range games {
out = append(out, gameSnapshot(g))
}
return out, nil
}
func (a *diplomailGameAdapter) GetGame(_ context.Context, gameID uuid.UUID) (diplomail.GameSnapshot, error) {
if a == nil || a.lobby == nil || a.lobby.Cache() == nil {
return diplomail.GameSnapshot{}, diplomail.ErrNotFound
}
game, ok := a.lobby.Cache().GetGame(gameID)
if !ok {
return diplomail.GameSnapshot{}, diplomail.ErrNotFound
}
return gameSnapshot(game), nil
}
func gameSnapshot(g lobby.GameRecord) diplomail.GameSnapshot {
out := diplomail.GameSnapshot{
GameID: g.GameID,
GameName: g.GameName,
Status: g.Status,
}
if g.FinishedAt != nil {
f := *g.FinishedAt
out.FinishedAt = &f
}
return out
}
func isRunningStatus(status string) bool {
switch status {
case lobby.GameStatusReadyToStart, lobby.GameStatusStarting, lobby.GameStatusRunning, lobby.GameStatusPaused:
return true
default:
return false
}
}
// diplomailNotificationPublisherAdapter implements
// `diplomail.NotificationPublisher` by translating each
// DiplomailNotification into a notification.Intent and routing it
// through `*notification.Service.Submit`. The publisher leaves the
// `diplomail.message.received` catalog entry to handle channel
// fan-out (push only in Stage A).
type diplomailNotificationPublisherAdapter struct {
svc *notification.Service
}
func (a *diplomailNotificationPublisherAdapter) PublishDiplomailEvent(ctx context.Context, ev diplomail.DiplomailNotification) error {
if a == nil || a.svc == nil {
return nil
}
intent := notification.Intent{
Kind: ev.Kind,
IdempotencyKey: ev.IdempotencyKey,
Recipients: []uuid.UUID{ev.Recipient},
Payload: ev.Payload,
}
_, err := a.svc.Submit(ctx, intent)
return err
}