Files
galaxy-game/gamemaster/internal/app/wiring.go
T
2026-05-03 07:59:03 +02:00

480 lines
14 KiB
Go

package app
import (
"database/sql"
"errors"
"fmt"
"log/slog"
"time"
"galaxy/gamemaster/internal/adapters/engineclient"
"galaxy/gamemaster/internal/adapters/lobbyclient"
"galaxy/gamemaster/internal/adapters/lobbyeventspublisher"
"galaxy/gamemaster/internal/adapters/notificationpublisher"
"galaxy/gamemaster/internal/adapters/postgres/engineversionstore"
"galaxy/gamemaster/internal/adapters/postgres/operationlog"
"galaxy/gamemaster/internal/adapters/postgres/playermappingstore"
"galaxy/gamemaster/internal/adapters/postgres/runtimerecordstore"
"galaxy/gamemaster/internal/adapters/redisstate/streamoffsets"
"galaxy/gamemaster/internal/adapters/rtmclient"
"galaxy/gamemaster/internal/config"
"galaxy/gamemaster/internal/service/adminbanish"
"galaxy/gamemaster/internal/service/adminforce"
"galaxy/gamemaster/internal/service/adminpatch"
"galaxy/gamemaster/internal/service/adminstop"
"galaxy/gamemaster/internal/service/commandexecute"
engineversionsvc "galaxy/gamemaster/internal/service/engineversion"
"galaxy/gamemaster/internal/service/livenessreply"
"galaxy/gamemaster/internal/service/membership"
"galaxy/gamemaster/internal/service/orderput"
"galaxy/gamemaster/internal/service/registerruntime"
"galaxy/gamemaster/internal/service/reportget"
"galaxy/gamemaster/internal/service/scheduler"
"galaxy/gamemaster/internal/service/turngeneration"
"galaxy/gamemaster/internal/telemetry"
"galaxy/gamemaster/internal/worker/healtheventsconsumer"
"galaxy/gamemaster/internal/worker/schedulerticker"
"github.com/redis/go-redis/v9"
)
// wiring owns the process-level singletons constructed once during
// `NewRuntime` and consumed by every worker and HTTP handler. Stage
// 19 grew the struct to hold every store, adapter, service and
// worker required by the listener and the long-lived components.
type wiring struct {
cfg config.Config
redisClient *redis.Client
pgPool *sql.DB
clock func() time.Time
logger *slog.Logger
telemetry *telemetry.Runtime
// Stores.
runtimeRecords *runtimerecordstore.Store
engineVersions *engineversionstore.Store
playerMappings *playermappingstore.Store
operationLogs *operationlog.Store
streamOffsets *streamoffsets.Store
// External adapters.
engineClient *engineclient.Client
lobbyClient *lobbyclient.Client
rtmClient *rtmclient.Client
notificationPublisher *notificationpublisher.Publisher
lobbyEventsPublisher *lobbyeventspublisher.Publisher
// Services.
membershipCache *membership.Cache
registerRuntimeSvc *registerruntime.Service
engineVersionSvc *engineversionsvc.Service
stopRuntimeSvc *adminstop.Service
forceNextTurnSvc *adminforce.Service
patchRuntimeSvc *adminpatch.Service
banishRaceSvc *adminbanish.Service
livenessSvc *livenessreply.Service
commandExecuteSvc *commandexecute.Service
orderPutSvc *orderput.Service
reportGetSvc *reportget.Service
schedulerSvc *scheduler.Service
turnGenerationSvc *turngeneration.Service
// Workers.
schedulerTicker *schedulerticker.Worker
healthEventsConsumer *healtheventsconsumer.Worker
// closers releases adapter-level resources at runtime shutdown.
closers []func() error
}
// newWiring constructs the process-level dependency set. It validates
// every required collaborator so callers can rely on them being
// non-nil. Construction proceeds in four phases: persistence stores,
// external adapters, services, workers. Each phase is in its own
// helper to keep the function readable.
func newWiring(
cfg config.Config,
redisClient *redis.Client,
pgPool *sql.DB,
clock func() time.Time,
logger *slog.Logger,
telemetryRuntime *telemetry.Runtime,
) (*wiring, error) {
if redisClient == nil {
return nil, errors.New("new gamemaster wiring: nil redis client")
}
if pgPool == nil {
return nil, errors.New("new gamemaster wiring: nil postgres pool")
}
if clock == nil {
clock = time.Now
}
if logger == nil {
logger = slog.Default()
}
if telemetryRuntime == nil {
return nil, fmt.Errorf("new gamemaster wiring: nil telemetry runtime")
}
w := &wiring{
cfg: cfg,
redisClient: redisClient,
pgPool: pgPool,
clock: clock,
logger: logger,
telemetry: telemetryRuntime,
}
if err := w.buildPersistence(); err != nil {
return nil, fmt.Errorf("new gamemaster wiring: persistence: %w", err)
}
if err := w.buildAdapters(); err != nil {
return nil, fmt.Errorf("new gamemaster wiring: adapters: %w", err)
}
if err := w.buildServices(); err != nil {
return nil, fmt.Errorf("new gamemaster wiring: services: %w", err)
}
if err := w.buildWorkers(); err != nil {
return nil, fmt.Errorf("new gamemaster wiring: workers: %w", err)
}
return w, nil
}
// buildPersistence constructs the four PostgreSQL stores plus the
// Redis-backed stream-offset store. The stores share the connection
// pools opened by the runtime; their lifecycles are owned by the
// runtime, not the wiring.
func (w *wiring) buildPersistence() error {
timeout := w.cfg.Postgres.Conn.OperationTimeout
runtimeRecords, err := runtimerecordstore.New(runtimerecordstore.Config{
DB: w.pgPool,
OperationTimeout: timeout,
})
if err != nil {
return fmt.Errorf("runtime record store: %w", err)
}
w.runtimeRecords = runtimeRecords
engineVersions, err := engineversionstore.New(engineversionstore.Config{
DB: w.pgPool,
OperationTimeout: timeout,
})
if err != nil {
return fmt.Errorf("engine version store: %w", err)
}
w.engineVersions = engineVersions
playerMappings, err := playermappingstore.New(playermappingstore.Config{
DB: w.pgPool,
OperationTimeout: timeout,
})
if err != nil {
return fmt.Errorf("player mapping store: %w", err)
}
w.playerMappings = playerMappings
operationLogs, err := operationlog.New(operationlog.Config{
DB: w.pgPool,
OperationTimeout: timeout,
})
if err != nil {
return fmt.Errorf("operation log store: %w", err)
}
w.operationLogs = operationLogs
streamOffsets, err := streamoffsets.New(streamoffsets.Config{Client: w.redisClient})
if err != nil {
return fmt.Errorf("stream offset store: %w", err)
}
w.streamOffsets = streamOffsets
return nil
}
// buildAdapters constructs the HTTP clients (engine, Lobby, Runtime
// Manager) and the two Redis Stream publishers. Their `Close` hooks
// are appended to w.closers so idle TCP connections are released on
// shutdown.
func (w *wiring) buildAdapters() error {
engine, err := engineclient.NewClient(engineclient.Config{
CallTimeout: w.cfg.EngineClient.CallTimeout,
ProbeTimeout: w.cfg.EngineClient.ProbeTimeout,
})
if err != nil {
return fmt.Errorf("engine client: %w", err)
}
w.engineClient = engine
w.closers = append(w.closers, engine.Close)
lobby, err := lobbyclient.NewClient(lobbyclient.Config{
BaseURL: w.cfg.Lobby.BaseURL,
RequestTimeout: w.cfg.Lobby.Timeout,
})
if err != nil {
return fmt.Errorf("lobby client: %w", err)
}
w.lobbyClient = lobby
w.closers = append(w.closers, lobby.Close)
rtm, err := rtmclient.NewClient(rtmclient.Config{
BaseURL: w.cfg.RTM.BaseURL,
RequestTimeout: w.cfg.RTM.Timeout,
})
if err != nil {
return fmt.Errorf("rtm client: %w", err)
}
w.rtmClient = rtm
w.closers = append(w.closers, rtm.Close)
notification, err := notificationpublisher.NewPublisher(notificationpublisher.Config{
Client: w.redisClient,
Stream: w.cfg.Streams.NotificationIntents,
})
if err != nil {
return fmt.Errorf("notification publisher: %w", err)
}
w.notificationPublisher = notification
lobbyEvents, err := lobbyeventspublisher.NewPublisher(lobbyeventspublisher.Config{
Client: w.redisClient,
Stream: w.cfg.Streams.LobbyEvents,
})
if err != nil {
return fmt.Errorf("lobby events publisher: %w", err)
}
w.lobbyEventsPublisher = lobbyEvents
return nil
}
// buildServices constructs every service-layer collaborator consumed
// by the REST listener and the workers. Construction order matters
// only between turngeneration → adminforce (the latter wraps the
// former) and between membership cache → command/order/report
// services.
func (w *wiring) buildServices() error {
cache, err := membership.NewCache(membership.Dependencies{
Lobby: w.lobbyClient,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
TTL: w.cfg.MembershipCache.TTL,
MaxGames: w.cfg.MembershipCache.MaxGames,
})
if err != nil {
return fmt.Errorf("membership cache: %w", err)
}
w.membershipCache = cache
w.schedulerSvc = scheduler.New()
registerSvc, err := registerruntime.NewService(registerruntime.Dependencies{
RuntimeRecords: w.runtimeRecords,
EngineVersions: w.engineVersions,
PlayerMappings: w.playerMappings,
OperationLogs: w.operationLogs,
Engine: w.engineClient,
LobbyEvents: w.lobbyEventsPublisher,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("register runtime service: %w", err)
}
w.registerRuntimeSvc = registerSvc
engineVersionSvc, err := engineversionsvc.NewService(engineversionsvc.Dependencies{
EngineVersions: w.engineVersions,
OperationLogs: w.operationLogs,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("engine version service: %w", err)
}
w.engineVersionSvc = engineVersionSvc
turnGen, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: w.runtimeRecords,
PlayerMappings: w.playerMappings,
OperationLogs: w.operationLogs,
Engine: w.engineClient,
LobbyEvents: w.lobbyEventsPublisher,
Notifications: w.notificationPublisher,
Lobby: w.lobbyClient,
Scheduler: w.schedulerSvc,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("turn generation service: %w", err)
}
w.turnGenerationSvc = turnGen
stopSvc, err := adminstop.NewService(adminstop.Dependencies{
RuntimeRecords: w.runtimeRecords,
OperationLogs: w.operationLogs,
RTM: w.rtmClient,
LobbyEvents: w.lobbyEventsPublisher,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("admin stop service: %w", err)
}
w.stopRuntimeSvc = stopSvc
forceSvc, err := adminforce.NewService(adminforce.Dependencies{
RuntimeRecords: w.runtimeRecords,
OperationLogs: w.operationLogs,
TurnGeneration: turnGen,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("admin force service: %w", err)
}
w.forceNextTurnSvc = forceSvc
patchSvc, err := adminpatch.NewService(adminpatch.Dependencies{
RuntimeRecords: w.runtimeRecords,
EngineVersions: w.engineVersions,
OperationLogs: w.operationLogs,
RTM: w.rtmClient,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("admin patch service: %w", err)
}
w.patchRuntimeSvc = patchSvc
banishSvc, err := adminbanish.NewService(adminbanish.Dependencies{
RuntimeRecords: w.runtimeRecords,
PlayerMappings: w.playerMappings,
OperationLogs: w.operationLogs,
Engine: w.engineClient,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("admin banish service: %w", err)
}
w.banishRaceSvc = banishSvc
livenessSvc, err := livenessreply.NewService(livenessreply.Dependencies{
RuntimeRecords: w.runtimeRecords,
Logger: w.logger,
})
if err != nil {
return fmt.Errorf("liveness reply service: %w", err)
}
w.livenessSvc = livenessSvc
commandSvc, err := commandexecute.NewService(commandexecute.Dependencies{
RuntimeRecords: w.runtimeRecords,
PlayerMappings: w.playerMappings,
Membership: cache,
Engine: w.engineClient,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("command execute service: %w", err)
}
w.commandExecuteSvc = commandSvc
orderSvc, err := orderput.NewService(orderput.Dependencies{
RuntimeRecords: w.runtimeRecords,
PlayerMappings: w.playerMappings,
Membership: cache,
Engine: w.engineClient,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("put orders service: %w", err)
}
w.orderPutSvc = orderSvc
reportSvc, err := reportget.NewService(reportget.Dependencies{
RuntimeRecords: w.runtimeRecords,
PlayerMappings: w.playerMappings,
Membership: cache,
Engine: w.engineClient,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("get report service: %w", err)
}
w.reportGetSvc = reportSvc
return nil
}
// buildWorkers constructs the long-lived components started by
// `App.Run` alongside the listener: the per-second scheduler ticker
// and the runtime:health_events consumer.
func (w *wiring) buildWorkers() error {
ticker, err := schedulerticker.NewWorker(schedulerticker.Dependencies{
RuntimeRecords: w.runtimeRecords,
TurnGeneration: w.turnGenerationSvc,
Telemetry: w.telemetry,
Interval: w.cfg.Scheduler.TickInterval,
Clock: w.clock,
Logger: w.logger,
})
if err != nil {
return fmt.Errorf("scheduler ticker: %w", err)
}
w.schedulerTicker = ticker
healthConsumer, err := healtheventsconsumer.NewWorker(healtheventsconsumer.Dependencies{
Client: w.redisClient,
Stream: w.cfg.Streams.HealthEvents,
BlockTimeout: w.cfg.Streams.BlockTimeout,
OffsetStore: w.streamOffsets,
RuntimeRecords: w.runtimeRecords,
LobbyEvents: w.lobbyEventsPublisher,
Telemetry: w.telemetry,
Clock: w.clock,
Logger: w.logger,
})
if err != nil {
return fmt.Errorf("health events consumer: %w", err)
}
w.healthEventsConsumer = healthConsumer
return nil
}
// close releases adapter-level resources owned by the wiring layer.
// Returns the joined error of every closer; the caller is expected
// to invoke this once during process shutdown. Closers run in LIFO
// order so the resource opened last is released first.
func (w *wiring) close() error {
var joined error
for index := len(w.closers) - 1; index >= 0; index-- {
if err := w.closers[index](); err != nil {
joined = errors.Join(joined, err)
}
}
w.closers = nil
return joined
}