feat: gamemaster

This commit is contained in:
Ilia Denisov
2026-05-03 07:59:03 +02:00
committed by GitHub
parent a7cee15115
commit 3e2622757e
229 changed files with 41521 additions and 1098 deletions
+170
View File
@@ -0,0 +1,170 @@
// Package app wires the Game Master process lifecycle and coordinates
// component startup and graceful shutdown.
package app
import (
"context"
"errors"
"fmt"
"sync"
"galaxy/gamemaster/internal/config"
)
// Component is a long-lived Game Master subsystem that participates in
// coordinated startup and graceful shutdown.
type Component interface {
// Run starts the component and blocks until it stops.
Run(context.Context) error
// Shutdown stops the component within the provided timeout-bounded
// context.
Shutdown(context.Context) error
}
// App owns the process-level lifecycle of Game Master and its registered
// components.
type App struct {
cfg config.Config
components []Component
}
// New constructs App with a defensive copy of the supplied components.
func New(cfg config.Config, components ...Component) *App {
clonedComponents := append([]Component(nil), components...)
return &App{
cfg: cfg,
components: clonedComponents,
}
}
// Run starts all configured components, waits for cancellation or the
// first component failure, and then executes best-effort graceful
// shutdown.
func (app *App) Run(ctx context.Context) error {
if ctx == nil {
return errors.New("run gamemaster app: nil context")
}
if err := app.validate(); err != nil {
return err
}
if len(app.components) == 0 {
<-ctx.Done()
return nil
}
runCtx, cancel := context.WithCancel(ctx)
defer cancel()
results := make(chan componentResult, len(app.components))
var runWaitGroup sync.WaitGroup
for index, component := range app.components {
runWaitGroup.Add(1)
go func(componentIndex int, component Component) {
defer runWaitGroup.Done()
results <- componentResult{
index: componentIndex,
err: component.Run(runCtx),
}
}(index, component)
}
var runErr error
select {
case <-ctx.Done():
case result := <-results:
runErr = classifyComponentResult(ctx, result)
}
cancel()
shutdownErr := app.shutdownComponents()
waitErr := app.waitForComponents(&runWaitGroup)
return errors.Join(runErr, shutdownErr, waitErr)
}
type componentResult struct {
index int
err error
}
func (app *App) validate() error {
if app.cfg.ShutdownTimeout <= 0 {
return fmt.Errorf("run gamemaster app: shutdown timeout must be positive, got %s", app.cfg.ShutdownTimeout)
}
for index, component := range app.components {
if component == nil {
return fmt.Errorf("run gamemaster app: component %d is nil", index)
}
}
return nil
}
func classifyComponentResult(parentCtx context.Context, result componentResult) error {
switch {
case result.err == nil:
if parentCtx.Err() != nil {
return nil
}
return fmt.Errorf("run gamemaster app: component %d exited without error before shutdown", result.index)
case errors.Is(result.err, context.Canceled) && parentCtx.Err() != nil:
return nil
default:
return fmt.Errorf("run gamemaster app: component %d: %w", result.index, result.err)
}
}
func (app *App) shutdownComponents() error {
var shutdownWaitGroup sync.WaitGroup
errs := make(chan error, len(app.components))
for index, component := range app.components {
shutdownWaitGroup.Add(1)
go func(componentIndex int, component Component) {
defer shutdownWaitGroup.Done()
shutdownCtx, cancel := context.WithTimeout(context.Background(), app.cfg.ShutdownTimeout)
defer cancel()
if err := component.Shutdown(shutdownCtx); err != nil {
errs <- fmt.Errorf("shutdown gamemaster component %d: %w", componentIndex, err)
}
}(index, component)
}
shutdownWaitGroup.Wait()
close(errs)
var joined error
for err := range errs {
joined = errors.Join(joined, err)
}
return joined
}
func (app *App) waitForComponents(runWaitGroup *sync.WaitGroup) error {
done := make(chan struct{})
go func() {
runWaitGroup.Wait()
close(done)
}()
waitCtx, cancel := context.WithTimeout(context.Background(), app.cfg.ShutdownTimeout)
defer cancel()
select {
case <-done:
return nil
case <-waitCtx.Done():
return fmt.Errorf("wait for gamemaster components: %w", waitCtx.Err())
}
}
+125
View File
@@ -0,0 +1,125 @@
package app
import (
"context"
"errors"
"strings"
"sync/atomic"
"testing"
"time"
"galaxy/gamemaster/internal/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type fakeComponent struct {
runErr error
shutdownErr error
runHook func(context.Context) error
shutdownHook func(context.Context) error
runCount atomic.Int32
downCount atomic.Int32
blockForCtx bool
}
func (component *fakeComponent) Run(ctx context.Context) error {
component.runCount.Add(1)
if component.runHook != nil {
return component.runHook(ctx)
}
if component.blockForCtx {
<-ctx.Done()
return ctx.Err()
}
return component.runErr
}
func (component *fakeComponent) Shutdown(ctx context.Context) error {
component.downCount.Add(1)
if component.shutdownHook != nil {
return component.shutdownHook(ctx)
}
return component.shutdownErr
}
func newCfg() config.Config {
return config.Config{ShutdownTimeout: time.Second}
}
func TestAppRunWithoutComponentsBlocksUntilContextDone(t *testing.T) {
t.Parallel()
app := New(newCfg())
ctx, cancel := context.WithCancel(context.Background())
cancel()
require.NoError(t, app.Run(ctx))
}
func TestAppRunReturnsOnContextCancel(t *testing.T) {
t.Parallel()
component := &fakeComponent{blockForCtx: true}
app := New(newCfg(), component)
ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(10 * time.Millisecond)
cancel()
}()
require.NoError(t, app.Run(ctx))
assert.EqualValues(t, 1, component.runCount.Load())
assert.EqualValues(t, 1, component.downCount.Load())
}
func TestAppRunPropagatesComponentFailure(t *testing.T) {
t.Parallel()
failure := errors.New("boom")
component := &fakeComponent{runErr: failure}
app := New(newCfg(), component)
err := app.Run(context.Background())
require.Error(t, err)
require.ErrorIs(t, err, failure)
assert.EqualValues(t, 1, component.downCount.Load())
}
func TestAppRunFailsOnNilContext(t *testing.T) {
t.Parallel()
app := New(newCfg())
var ctx context.Context
require.Error(t, app.Run(ctx))
}
func TestAppRunFailsOnNonPositiveShutdownTimeout(t *testing.T) {
t.Parallel()
app := New(config.Config{}, &fakeComponent{})
require.Error(t, app.Run(context.Background()))
}
func TestAppRunFailsOnNilComponent(t *testing.T) {
t.Parallel()
app := New(newCfg(), nil)
require.Error(t, app.Run(context.Background()))
}
func TestAppRunFlagsCleanExitBeforeShutdown(t *testing.T) {
t.Parallel()
component := &fakeComponent{}
app := New(newCfg(), component)
err := app.Run(context.Background())
require.Error(t, err)
require.True(t, strings.Contains(err.Error(), "exited without error"))
}
+45
View File
@@ -0,0 +1,45 @@
package app
import (
"context"
"errors"
"galaxy/redisconn"
"galaxy/gamemaster/internal/config"
"galaxy/gamemaster/internal/telemetry"
"github.com/redis/go-redis/v9"
)
// newRedisClient builds the master Redis client from cfg via the shared
// `pkg/redisconn` helper. Replica clients are not opened in this iteration
// per ARCHITECTURE.md §Persistence Backends; they will be wired when read
// routing is introduced.
func newRedisClient(cfg config.RedisConfig) *redis.Client {
return redisconn.NewMasterClient(cfg.Conn)
}
// instrumentRedisClient attaches the OpenTelemetry tracing and metrics
// instrumentation to client when telemetryRuntime is available. The
// actual instrumentation lives in `pkg/redisconn` so every Galaxy service
// shares one surface.
func instrumentRedisClient(redisClient *redis.Client, telemetryRuntime *telemetry.Runtime) error {
if redisClient == nil {
return errors.New("instrument redis client: nil client")
}
if telemetryRuntime == nil {
return nil
}
return redisconn.Instrument(redisClient,
redisconn.WithTracerProvider(telemetryRuntime.TracerProvider()),
redisconn.WithMeterProvider(telemetryRuntime.MeterProvider()),
)
}
// pingRedis performs a single Redis PING bounded by
// cfg.Conn.OperationTimeout to confirm that the configured Redis endpoint
// is reachable at startup.
func pingRedis(ctx context.Context, cfg config.RedisConfig, redisClient *redis.Client) error {
return redisconn.Ping(ctx, redisClient, cfg.Conn.OperationTimeout)
}
+238
View File
@@ -0,0 +1,238 @@
package app
import (
"context"
"database/sql"
"errors"
"fmt"
"log/slog"
"time"
"galaxy/postgres"
"galaxy/redisconn"
"galaxy/gamemaster/internal/adapters/postgres/migrations"
"galaxy/gamemaster/internal/api/internalhttp"
"galaxy/gamemaster/internal/config"
"galaxy/gamemaster/internal/telemetry"
"github.com/redis/go-redis/v9"
)
// Runtime owns the runnable Game Master process plus the cleanup
// functions that release runtime resources after shutdown.
type Runtime struct {
cfg config.Config
app *App
wiring *wiring
internalServer *internalhttp.Server
cleanupFns []func() error
}
// NewRuntime constructs the runnable Game Master process from cfg.
//
// The runtime opens one shared `*redis.Client`, one `*sql.DB`, and one
// OpenTelemetry runtime; all are released in reverse construction order
// on shutdown. Embedded goose migrations apply synchronously after the
// PostgreSQL pool is opened and pinged, before any listener is constructed.
func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*Runtime, error) {
if ctx == nil {
return nil, errors.New("new gamemaster runtime: nil context")
}
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new gamemaster runtime: %w", err)
}
if logger == nil {
logger = slog.Default()
}
runtime := &Runtime{
cfg: cfg,
}
cleanupOnError := func(err error) (*Runtime, error) {
if cleanupErr := runtime.Close(); cleanupErr != nil {
return nil, fmt.Errorf("%w; cleanup: %w", err, cleanupErr)
}
return nil, err
}
telemetryRuntime, err := telemetry.NewProcess(ctx, telemetry.ProcessConfig{
ServiceName: cfg.Telemetry.ServiceName,
TracesExporter: cfg.Telemetry.TracesExporter,
MetricsExporter: cfg.Telemetry.MetricsExporter,
TracesProtocol: cfg.Telemetry.TracesProtocol,
MetricsProtocol: cfg.Telemetry.MetricsProtocol,
StdoutTracesEnabled: cfg.Telemetry.StdoutTracesEnabled,
StdoutMetricsEnabled: cfg.Telemetry.StdoutMetricsEnabled,
}, logger)
if err != nil {
return cleanupOnError(fmt.Errorf("new gamemaster runtime: telemetry: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
shutdownCtx, cancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
defer cancel()
return telemetryRuntime.Shutdown(shutdownCtx)
})
redisClient := newRedisClient(cfg.Redis)
if err := instrumentRedisClient(redisClient, telemetryRuntime); err != nil {
return cleanupOnError(fmt.Errorf("new gamemaster runtime: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
err := redisClient.Close()
if errors.Is(err, redis.ErrClosed) {
return nil
}
return err
})
if err := pingRedis(ctx, cfg.Redis, redisClient); err != nil {
return cleanupOnError(fmt.Errorf("new gamemaster runtime: %w", err))
}
pgPool, err := postgres.OpenPrimary(ctx, cfg.Postgres.Conn,
postgres.WithTracerProvider(telemetryRuntime.TracerProvider()),
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
)
if err != nil {
return cleanupOnError(fmt.Errorf("new gamemaster runtime: open postgres: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, pgPool.Close)
unregisterPGStats, err := postgres.InstrumentDBStats(pgPool,
postgres.WithMeterProvider(telemetryRuntime.MeterProvider()),
)
if err != nil {
return cleanupOnError(fmt.Errorf("new gamemaster runtime: instrument postgres: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
return unregisterPGStats()
})
if err := postgres.Ping(ctx, pgPool, cfg.Postgres.Conn.OperationTimeout); err != nil {
return cleanupOnError(fmt.Errorf("new gamemaster runtime: ping postgres: %w", err))
}
if err := postgres.RunMigrations(ctx, pgPool, migrations.FS(), "."); err != nil {
return cleanupOnError(fmt.Errorf("new gamemaster runtime: run postgres migrations: %w", err))
}
wiring, err := newWiring(cfg, redisClient, pgPool, time.Now, logger, telemetryRuntime)
if err != nil {
return cleanupOnError(fmt.Errorf("new gamemaster runtime: wiring: %w", err))
}
runtime.wiring = wiring
runtime.cleanupFns = append(runtime.cleanupFns, wiring.close)
probe := newReadinessProbe(pgPool, redisClient, cfg)
internalServer, err := internalhttp.NewServer(internalhttp.Config{
Addr: cfg.InternalHTTP.Addr,
ReadHeaderTimeout: cfg.InternalHTTP.ReadHeaderTimeout,
ReadTimeout: cfg.InternalHTTP.ReadTimeout,
WriteTimeout: cfg.InternalHTTP.WriteTimeout,
IdleTimeout: cfg.InternalHTTP.IdleTimeout,
}, internalhttp.Dependencies{
Logger: logger,
Telemetry: telemetryRuntime,
Readiness: probe,
RuntimeRecords: wiring.runtimeRecords,
RegisterRuntime: wiring.registerRuntimeSvc,
ForceNextTurn: wiring.forceNextTurnSvc,
StopRuntime: wiring.stopRuntimeSvc,
PatchRuntime: wiring.patchRuntimeSvc,
BanishRace: wiring.banishRaceSvc,
InvalidateMemberships: wiring.membershipCache,
GameLiveness: wiring.livenessSvc,
EngineVersions: wiring.engineVersionSvc,
CommandExecute: wiring.commandExecuteSvc,
PutOrders: wiring.orderPutSvc,
GetReport: wiring.reportGetSvc,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new gamemaster runtime: internal HTTP server: %w", err))
}
runtime.internalServer = internalServer
runtime.app = New(cfg,
internalServer,
wiring.schedulerTicker,
wiring.healthEventsConsumer,
)
return runtime, nil
}
// InternalServer returns the internal HTTP server owned by runtime. It is
// primarily exposed for tests; production code should not depend on it.
func (runtime *Runtime) InternalServer() *internalhttp.Server {
if runtime == nil {
return nil
}
return runtime.internalServer
}
// Run serves the internal HTTP listener until ctx is canceled or one
// component fails.
func (runtime *Runtime) Run(ctx context.Context) error {
if ctx == nil {
return errors.New("run gamemaster runtime: nil context")
}
if runtime == nil {
return errors.New("run gamemaster runtime: nil runtime")
}
if runtime.app == nil {
return errors.New("run gamemaster runtime: nil app")
}
return runtime.app.Run(ctx)
}
// Close releases every runtime dependency in reverse construction order.
// Close is safe to call multiple times.
func (runtime *Runtime) Close() error {
if runtime == nil {
return nil
}
var joined error
for index := len(runtime.cleanupFns) - 1; index >= 0; index-- {
if err := runtime.cleanupFns[index](); err != nil {
joined = errors.Join(joined, err)
}
}
runtime.cleanupFns = nil
return joined
}
// readinessProbe pings every steady-state dependency the listener
// guards: PostgreSQL primary and Redis master.
type readinessProbe struct {
pgPool *sql.DB
redisClient *redis.Client
postgresTimeout time.Duration
redisTimeout time.Duration
}
func newReadinessProbe(pgPool *sql.DB, redisClient *redis.Client, cfg config.Config) *readinessProbe {
return &readinessProbe{
pgPool: pgPool,
redisClient: redisClient,
postgresTimeout: cfg.Postgres.Conn.OperationTimeout,
redisTimeout: cfg.Redis.Conn.OperationTimeout,
}
}
// Check pings PostgreSQL and Redis. The first failing dependency aborts
// the check so callers see a single, actionable error.
func (probe *readinessProbe) Check(ctx context.Context) error {
if err := postgres.Ping(ctx, probe.pgPool, probe.postgresTimeout); err != nil {
return err
}
return redisconn.Ping(ctx, probe.redisClient, probe.redisTimeout)
}
+479
View File
@@ -0,0 +1,479 @@
package app
import (
"database/sql"
"errors"
"fmt"
"log/slog"
"time"
"galaxy/gamemaster/internal/adapters/engineclient"
"galaxy/gamemaster/internal/adapters/lobbyclient"
"galaxy/gamemaster/internal/adapters/lobbyeventspublisher"
"galaxy/gamemaster/internal/adapters/notificationpublisher"
"galaxy/gamemaster/internal/adapters/postgres/engineversionstore"
"galaxy/gamemaster/internal/adapters/postgres/operationlog"
"galaxy/gamemaster/internal/adapters/postgres/playermappingstore"
"galaxy/gamemaster/internal/adapters/postgres/runtimerecordstore"
"galaxy/gamemaster/internal/adapters/redisstate/streamoffsets"
"galaxy/gamemaster/internal/adapters/rtmclient"
"galaxy/gamemaster/internal/config"
"galaxy/gamemaster/internal/service/adminbanish"
"galaxy/gamemaster/internal/service/adminforce"
"galaxy/gamemaster/internal/service/adminpatch"
"galaxy/gamemaster/internal/service/adminstop"
"galaxy/gamemaster/internal/service/commandexecute"
engineversionsvc "galaxy/gamemaster/internal/service/engineversion"
"galaxy/gamemaster/internal/service/livenessreply"
"galaxy/gamemaster/internal/service/membership"
"galaxy/gamemaster/internal/service/orderput"
"galaxy/gamemaster/internal/service/registerruntime"
"galaxy/gamemaster/internal/service/reportget"
"galaxy/gamemaster/internal/service/scheduler"
"galaxy/gamemaster/internal/service/turngeneration"
"galaxy/gamemaster/internal/telemetry"
"galaxy/gamemaster/internal/worker/healtheventsconsumer"
"galaxy/gamemaster/internal/worker/schedulerticker"
"github.com/redis/go-redis/v9"
)
// wiring owns the process-level singletons constructed once during
// `NewRuntime` and consumed by every worker and HTTP handler. Stage
// 19 grew the struct to hold every store, adapter, service and
// worker required by the listener and the long-lived components.
type wiring struct {
cfg config.Config
redisClient *redis.Client
pgPool *sql.DB
clock func() time.Time
logger *slog.Logger
telemetry *telemetry.Runtime
// Stores.
runtimeRecords *runtimerecordstore.Store
engineVersions *engineversionstore.Store
playerMappings *playermappingstore.Store
operationLogs *operationlog.Store
streamOffsets *streamoffsets.Store
// External adapters.
engineClient *engineclient.Client
lobbyClient *lobbyclient.Client
rtmClient *rtmclient.Client
notificationPublisher *notificationpublisher.Publisher
lobbyEventsPublisher *lobbyeventspublisher.Publisher
// Services.
membershipCache *membership.Cache
registerRuntimeSvc *registerruntime.Service
engineVersionSvc *engineversionsvc.Service
stopRuntimeSvc *adminstop.Service
forceNextTurnSvc *adminforce.Service
patchRuntimeSvc *adminpatch.Service
banishRaceSvc *adminbanish.Service
livenessSvc *livenessreply.Service
commandExecuteSvc *commandexecute.Service
orderPutSvc *orderput.Service
reportGetSvc *reportget.Service
schedulerSvc *scheduler.Service
turnGenerationSvc *turngeneration.Service
// Workers.
schedulerTicker *schedulerticker.Worker
healthEventsConsumer *healtheventsconsumer.Worker
// closers releases adapter-level resources at runtime shutdown.
closers []func() error
}
// newWiring constructs the process-level dependency set. It validates
// every required collaborator so callers can rely on them being
// non-nil. Construction proceeds in four phases: persistence stores,
// external adapters, services, workers. Each phase is in its own
// helper to keep the function readable.
func newWiring(
cfg config.Config,
redisClient *redis.Client,
pgPool *sql.DB,
clock func() time.Time,
logger *slog.Logger,
telemetryRuntime *telemetry.Runtime,
) (*wiring, error) {
if redisClient == nil {
return nil, errors.New("new gamemaster wiring: nil redis client")
}
if pgPool == nil {
return nil, errors.New("new gamemaster wiring: nil postgres pool")
}
if clock == nil {
clock = time.Now
}
if logger == nil {
logger = slog.Default()
}
if telemetryRuntime == nil {
return nil, fmt.Errorf("new gamemaster wiring: nil telemetry runtime")
}
w := &wiring{
cfg: cfg,
redisClient: redisClient,
pgPool: pgPool,
clock: clock,
logger: logger,
telemetry: telemetryRuntime,
}
if err := w.buildPersistence(); err != nil {
return nil, fmt.Errorf("new gamemaster wiring: persistence: %w", err)
}
if err := w.buildAdapters(); err != nil {
return nil, fmt.Errorf("new gamemaster wiring: adapters: %w", err)
}
if err := w.buildServices(); err != nil {
return nil, fmt.Errorf("new gamemaster wiring: services: %w", err)
}
if err := w.buildWorkers(); err != nil {
return nil, fmt.Errorf("new gamemaster wiring: workers: %w", err)
}
return w, nil
}
// buildPersistence constructs the four PostgreSQL stores plus the
// Redis-backed stream-offset store. The stores share the connection
// pools opened by the runtime; their lifecycles are owned by the
// runtime, not the wiring.
func (w *wiring) buildPersistence() error {
timeout := w.cfg.Postgres.Conn.OperationTimeout
runtimeRecords, err := runtimerecordstore.New(runtimerecordstore.Config{
DB: w.pgPool,
OperationTimeout: timeout,
})
if err != nil {
return fmt.Errorf("runtime record store: %w", err)
}
w.runtimeRecords = runtimeRecords
engineVersions, err := engineversionstore.New(engineversionstore.Config{
DB: w.pgPool,
OperationTimeout: timeout,
})
if err != nil {
return fmt.Errorf("engine version store: %w", err)
}
w.engineVersions = engineVersions
playerMappings, err := playermappingstore.New(playermappingstore.Config{
DB: w.pgPool,
OperationTimeout: timeout,
})
if err != nil {
return fmt.Errorf("player mapping store: %w", err)
}
w.playerMappings = playerMappings
operationLogs, err := operationlog.New(operationlog.Config{
DB: w.pgPool,
OperationTimeout: timeout,
})
if err != nil {
return fmt.Errorf("operation log store: %w", err)
}
w.operationLogs = operationLogs
streamOffsets, err := streamoffsets.New(streamoffsets.Config{Client: w.redisClient})
if err != nil {
return fmt.Errorf("stream offset store: %w", err)
}
w.streamOffsets = streamOffsets
return nil
}
// buildAdapters constructs the HTTP clients (engine, Lobby, Runtime
// Manager) and the two Redis Stream publishers. Their `Close` hooks
// are appended to w.closers so idle TCP connections are released on
// shutdown.
func (w *wiring) buildAdapters() error {
engine, err := engineclient.NewClient(engineclient.Config{
CallTimeout: w.cfg.EngineClient.CallTimeout,
ProbeTimeout: w.cfg.EngineClient.ProbeTimeout,
})
if err != nil {
return fmt.Errorf("engine client: %w", err)
}
w.engineClient = engine
w.closers = append(w.closers, engine.Close)
lobby, err := lobbyclient.NewClient(lobbyclient.Config{
BaseURL: w.cfg.Lobby.BaseURL,
RequestTimeout: w.cfg.Lobby.Timeout,
})
if err != nil {
return fmt.Errorf("lobby client: %w", err)
}
w.lobbyClient = lobby
w.closers = append(w.closers, lobby.Close)
rtm, err := rtmclient.NewClient(rtmclient.Config{
BaseURL: w.cfg.RTM.BaseURL,
RequestTimeout: w.cfg.RTM.Timeout,
})
if err != nil {
return fmt.Errorf("rtm client: %w", err)
}
w.rtmClient = rtm
w.closers = append(w.closers, rtm.Close)
notification, err := notificationpublisher.NewPublisher(notificationpublisher.Config{
Client: w.redisClient,
Stream: w.cfg.Streams.NotificationIntents,
})
if err != nil {
return fmt.Errorf("notification publisher: %w", err)
}
w.notificationPublisher = notification
lobbyEvents, err := lobbyeventspublisher.NewPublisher(lobbyeventspublisher.Config{
Client: w.redisClient,
Stream: w.cfg.Streams.LobbyEvents,
})
if err != nil {
return fmt.Errorf("lobby events publisher: %w", err)
}
w.lobbyEventsPublisher = lobbyEvents
return nil
}
// buildServices constructs every service-layer collaborator consumed
// by the REST listener and the workers. Construction order matters
// only between turngeneration → adminforce (the latter wraps the
// former) and between membership cache → command/order/report
// services.
func (w *wiring) buildServices() error {
cache, err := membership.NewCache(membership.Dependencies{
Lobby: w.lobbyClient,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
TTL: w.cfg.MembershipCache.TTL,
MaxGames: w.cfg.MembershipCache.MaxGames,
})
if err != nil {
return fmt.Errorf("membership cache: %w", err)
}
w.membershipCache = cache
w.schedulerSvc = scheduler.New()
registerSvc, err := registerruntime.NewService(registerruntime.Dependencies{
RuntimeRecords: w.runtimeRecords,
EngineVersions: w.engineVersions,
PlayerMappings: w.playerMappings,
OperationLogs: w.operationLogs,
Engine: w.engineClient,
LobbyEvents: w.lobbyEventsPublisher,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("register runtime service: %w", err)
}
w.registerRuntimeSvc = registerSvc
engineVersionSvc, err := engineversionsvc.NewService(engineversionsvc.Dependencies{
EngineVersions: w.engineVersions,
OperationLogs: w.operationLogs,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("engine version service: %w", err)
}
w.engineVersionSvc = engineVersionSvc
turnGen, err := turngeneration.NewService(turngeneration.Dependencies{
RuntimeRecords: w.runtimeRecords,
PlayerMappings: w.playerMappings,
OperationLogs: w.operationLogs,
Engine: w.engineClient,
LobbyEvents: w.lobbyEventsPublisher,
Notifications: w.notificationPublisher,
Lobby: w.lobbyClient,
Scheduler: w.schedulerSvc,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("turn generation service: %w", err)
}
w.turnGenerationSvc = turnGen
stopSvc, err := adminstop.NewService(adminstop.Dependencies{
RuntimeRecords: w.runtimeRecords,
OperationLogs: w.operationLogs,
RTM: w.rtmClient,
LobbyEvents: w.lobbyEventsPublisher,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("admin stop service: %w", err)
}
w.stopRuntimeSvc = stopSvc
forceSvc, err := adminforce.NewService(adminforce.Dependencies{
RuntimeRecords: w.runtimeRecords,
OperationLogs: w.operationLogs,
TurnGeneration: turnGen,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("admin force service: %w", err)
}
w.forceNextTurnSvc = forceSvc
patchSvc, err := adminpatch.NewService(adminpatch.Dependencies{
RuntimeRecords: w.runtimeRecords,
EngineVersions: w.engineVersions,
OperationLogs: w.operationLogs,
RTM: w.rtmClient,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("admin patch service: %w", err)
}
w.patchRuntimeSvc = patchSvc
banishSvc, err := adminbanish.NewService(adminbanish.Dependencies{
RuntimeRecords: w.runtimeRecords,
PlayerMappings: w.playerMappings,
OperationLogs: w.operationLogs,
Engine: w.engineClient,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("admin banish service: %w", err)
}
w.banishRaceSvc = banishSvc
livenessSvc, err := livenessreply.NewService(livenessreply.Dependencies{
RuntimeRecords: w.runtimeRecords,
Logger: w.logger,
})
if err != nil {
return fmt.Errorf("liveness reply service: %w", err)
}
w.livenessSvc = livenessSvc
commandSvc, err := commandexecute.NewService(commandexecute.Dependencies{
RuntimeRecords: w.runtimeRecords,
PlayerMappings: w.playerMappings,
Membership: cache,
Engine: w.engineClient,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("command execute service: %w", err)
}
w.commandExecuteSvc = commandSvc
orderSvc, err := orderput.NewService(orderput.Dependencies{
RuntimeRecords: w.runtimeRecords,
PlayerMappings: w.playerMappings,
Membership: cache,
Engine: w.engineClient,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("put orders service: %w", err)
}
w.orderPutSvc = orderSvc
reportSvc, err := reportget.NewService(reportget.Dependencies{
RuntimeRecords: w.runtimeRecords,
PlayerMappings: w.playerMappings,
Membership: cache,
Engine: w.engineClient,
Telemetry: w.telemetry,
Logger: w.logger,
Clock: w.clock,
})
if err != nil {
return fmt.Errorf("get report service: %w", err)
}
w.reportGetSvc = reportSvc
return nil
}
// buildWorkers constructs the long-lived components started by
// `App.Run` alongside the listener: the per-second scheduler ticker
// and the runtime:health_events consumer.
func (w *wiring) buildWorkers() error {
ticker, err := schedulerticker.NewWorker(schedulerticker.Dependencies{
RuntimeRecords: w.runtimeRecords,
TurnGeneration: w.turnGenerationSvc,
Telemetry: w.telemetry,
Interval: w.cfg.Scheduler.TickInterval,
Clock: w.clock,
Logger: w.logger,
})
if err != nil {
return fmt.Errorf("scheduler ticker: %w", err)
}
w.schedulerTicker = ticker
healthConsumer, err := healtheventsconsumer.NewWorker(healtheventsconsumer.Dependencies{
Client: w.redisClient,
Stream: w.cfg.Streams.HealthEvents,
BlockTimeout: w.cfg.Streams.BlockTimeout,
OffsetStore: w.streamOffsets,
RuntimeRecords: w.runtimeRecords,
LobbyEvents: w.lobbyEventsPublisher,
Telemetry: w.telemetry,
Clock: w.clock,
Logger: w.logger,
})
if err != nil {
return fmt.Errorf("health events consumer: %w", err)
}
w.healthEventsConsumer = healthConsumer
return nil
}
// close releases adapter-level resources owned by the wiring layer.
// Returns the joined error of every closer; the caller is expected
// to invoke this once during process shutdown. Closers run in LIFO
// order so the resource opened last is released first.
func (w *wiring) close() error {
var joined error
for index := len(w.closers) - 1; index >= 0; index-- {
if err := w.closers[index](); err != nil {
joined = errors.Join(joined, err)
}
}
w.closers = nil
return joined
}