feat: game lobby service

This commit is contained in:
Ilia Denisov
2026-04-25 23:20:55 +02:00
committed by GitHub
parent 32dc29359a
commit 48b0056b49
336 changed files with 57074 additions and 1418 deletions
+169
View File
@@ -0,0 +1,169 @@
// Package app wires the Game Lobby Service process lifecycle and
// coordinates component startup and graceful shutdown.
package app
import (
"context"
"errors"
"fmt"
"sync"
"galaxy/lobby/internal/config"
)
// Component is a long-lived Game Lobby Service subsystem that participates
// in coordinated startup and graceful shutdown.
type Component interface {
// Run starts the component and blocks until it stops.
Run(context.Context) error
// Shutdown stops the component within the provided timeout-bounded
// context.
Shutdown(context.Context) error
}
// App owns the process-level lifecycle of Game Lobby Service and its
// registered components.
type App struct {
cfg config.Config
components []Component
}
// New constructs App with a defensive copy of the supplied components.
func New(cfg config.Config, components ...Component) *App {
clonedComponents := append([]Component(nil), components...)
return &App{
cfg: cfg,
components: clonedComponents,
}
}
// Run starts all configured components, waits for cancellation or the first
// component failure, and then executes best-effort graceful shutdown.
func (app *App) Run(ctx context.Context) error {
if ctx == nil {
return errors.New("run lobby app: nil context")
}
if err := app.validate(); err != nil {
return err
}
if len(app.components) == 0 {
<-ctx.Done()
return nil
}
runCtx, cancel := context.WithCancel(ctx)
defer cancel()
results := make(chan componentResult, len(app.components))
var runWaitGroup sync.WaitGroup
for index, component := range app.components {
runWaitGroup.Add(1)
go func(componentIndex int, component Component) {
defer runWaitGroup.Done()
results <- componentResult{
index: componentIndex,
err: component.Run(runCtx),
}
}(index, component)
}
var runErr error
select {
case <-ctx.Done():
case result := <-results:
runErr = classifyComponentResult(ctx, result)
}
cancel()
shutdownErr := app.shutdownComponents()
waitErr := app.waitForComponents(&runWaitGroup)
return errors.Join(runErr, shutdownErr, waitErr)
}
type componentResult struct {
index int
err error
}
func (app *App) validate() error {
if app.cfg.ShutdownTimeout <= 0 {
return fmt.Errorf("run lobby app: shutdown timeout must be positive, got %s", app.cfg.ShutdownTimeout)
}
for index, component := range app.components {
if component == nil {
return fmt.Errorf("run lobby app: component %d is nil", index)
}
}
return nil
}
func classifyComponentResult(parentCtx context.Context, result componentResult) error {
switch {
case result.err == nil:
if parentCtx.Err() != nil {
return nil
}
return fmt.Errorf("run lobby app: component %d exited without error before shutdown", result.index)
case errors.Is(result.err, context.Canceled) && parentCtx.Err() != nil:
return nil
default:
return fmt.Errorf("run lobby app: component %d: %w", result.index, result.err)
}
}
func (app *App) shutdownComponents() error {
var shutdownWaitGroup sync.WaitGroup
errs := make(chan error, len(app.components))
for index, component := range app.components {
shutdownWaitGroup.Add(1)
go func(componentIndex int, component Component) {
defer shutdownWaitGroup.Done()
shutdownCtx, cancel := context.WithTimeout(context.Background(), app.cfg.ShutdownTimeout)
defer cancel()
if err := component.Shutdown(shutdownCtx); err != nil {
errs <- fmt.Errorf("shutdown lobby component %d: %w", componentIndex, err)
}
}(index, component)
}
shutdownWaitGroup.Wait()
close(errs)
var joined error
for err := range errs {
joined = errors.Join(joined, err)
}
return joined
}
func (app *App) waitForComponents(runWaitGroup *sync.WaitGroup) error {
done := make(chan struct{})
go func() {
runWaitGroup.Wait()
close(done)
}()
waitCtx, cancel := context.WithTimeout(context.Background(), app.cfg.ShutdownTimeout)
defer cancel()
select {
case <-done:
return nil
case <-waitCtx.Done():
return fmt.Errorf("wait for lobby components: %w", waitCtx.Err())
}
}
+173
View File
@@ -0,0 +1,173 @@
package app
import (
"context"
"errors"
"sync/atomic"
"testing"
"time"
"galaxy/lobby/internal/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type fakeComponent struct {
runErr error
shutdownErr error
runHook func(context.Context) error
shutdownHook func(context.Context) error
runCount atomic.Int32
downCount atomic.Int32
blockForCtx bool
}
func (component *fakeComponent) Run(ctx context.Context) error {
component.runCount.Add(1)
if component.runHook != nil {
return component.runHook(ctx)
}
if component.blockForCtx {
<-ctx.Done()
return ctx.Err()
}
return component.runErr
}
func (component *fakeComponent) Shutdown(ctx context.Context) error {
component.downCount.Add(1)
if component.shutdownHook != nil {
return component.shutdownHook(ctx)
}
return component.shutdownErr
}
func newCfg() config.Config {
return config.Config{ShutdownTimeout: time.Second}
}
func TestAppValidateRejectsNonPositiveTimeout(t *testing.T) {
t.Parallel()
app := New(config.Config{}, &fakeComponent{blockForCtx: true})
err := app.Run(context.Background())
require.Error(t, err)
require.Contains(t, err.Error(), "shutdown timeout must be positive")
}
func TestAppValidateRejectsNilComponent(t *testing.T) {
t.Parallel()
app := New(newCfg(), nil)
err := app.Run(context.Background())
require.Error(t, err)
require.Contains(t, err.Error(), "component 0 is nil")
}
func TestAppRunCancelledByContext(t *testing.T) {
t.Parallel()
component := &fakeComponent{blockForCtx: true}
app := New(newCfg(), component)
ctx, cancel := context.WithCancel(context.Background())
errCh := make(chan error, 1)
go func() {
errCh <- app.Run(ctx)
}()
time.Sleep(10 * time.Millisecond)
cancel()
select {
case err := <-errCh:
require.NoError(t, err)
case <-time.After(time.Second):
t.Fatal("app did not stop after cancellation")
}
assert.Equal(t, int32(1), component.runCount.Load())
assert.Equal(t, int32(1), component.downCount.Load())
}
func TestAppRunPropagatesComponentError(t *testing.T) {
t.Parallel()
failing := &fakeComponent{runErr: errors.New("boom")}
blocking := &fakeComponent{blockForCtx: true}
app := New(newCfg(), failing, blocking)
err := app.Run(context.Background())
require.Error(t, err)
require.Contains(t, err.Error(), "boom")
assert.Equal(t, int32(1), blocking.downCount.Load())
}
func TestAppRunEarlyCleanExit(t *testing.T) {
t.Parallel()
short := &fakeComponent{} // returns immediately without error
blocking := &fakeComponent{blockForCtx: true}
app := New(newCfg(), short, blocking)
err := app.Run(context.Background())
require.Error(t, err)
require.Contains(t, err.Error(), "exited without error before shutdown")
}
func TestAppRunShutdownCollectsErrors(t *testing.T) {
t.Parallel()
component := &fakeComponent{
blockForCtx: true,
shutdownErr: errors.New("shutdown-boom"),
}
app := New(newCfg(), component)
ctx, cancel := context.WithCancel(context.Background())
errCh := make(chan error, 1)
go func() {
errCh <- app.Run(ctx)
}()
time.Sleep(10 * time.Millisecond)
cancel()
err := <-errCh
require.Error(t, err)
require.Contains(t, err.Error(), "shutdown-boom")
}
func TestAppRunNoComponentsBlocksUntilCancel(t *testing.T) {
t.Parallel()
app := New(newCfg())
ctx, cancel := context.WithCancel(context.Background())
errCh := make(chan error, 1)
go func() {
errCh <- app.Run(ctx)
}()
time.Sleep(10 * time.Millisecond)
cancel()
select {
case err := <-errCh:
require.NoError(t, err)
case <-time.After(time.Second):
t.Fatal("app did not exit after cancel")
}
}
func TestAppRunNilContext(t *testing.T) {
t.Parallel()
app := New(newCfg(), &fakeComponent{blockForCtx: true})
err := app.Run(nil) //nolint:staticcheck // test exercises the nil-context guard.
require.Error(t, err)
require.Contains(t, err.Error(), "nil context")
}
+71
View File
@@ -0,0 +1,71 @@
package app
import (
"context"
"fmt"
"galaxy/lobby/internal/config"
"galaxy/lobby/internal/telemetry"
"github.com/redis/go-redis/extra/redisotel/v9"
"github.com/redis/go-redis/v9"
)
// newRedisClient builds a Redis client wired with the configured timeouts
// and TLS settings taken from cfg.
func newRedisClient(cfg config.RedisConfig) *redis.Client {
return redis.NewClient(&redis.Options{
Addr: cfg.Addr,
Username: cfg.Username,
Password: cfg.Password,
DB: cfg.DB,
TLSConfig: cfg.TLSConfig(),
DialTimeout: cfg.OperationTimeout,
ReadTimeout: cfg.OperationTimeout,
WriteTimeout: cfg.OperationTimeout,
})
}
// instrumentRedisClient attaches the OpenTelemetry tracing and metrics
// instrumentation to client when telemetryRuntime is available.
func instrumentRedisClient(client *redis.Client, telemetryRuntime *telemetry.Runtime) error {
if client == nil {
return fmt.Errorf("instrument redis client: nil client")
}
if telemetryRuntime == nil {
return nil
}
if err := redisotel.InstrumentTracing(
client,
redisotel.WithTracerProvider(telemetryRuntime.TracerProvider()),
redisotel.WithDBStatement(false),
); err != nil {
return fmt.Errorf("instrument redis client tracing: %w", err)
}
if err := redisotel.InstrumentMetrics(
client,
redisotel.WithMeterProvider(telemetryRuntime.MeterProvider()),
); err != nil {
return fmt.Errorf("instrument redis client metrics: %w", err)
}
return nil
}
// pingRedis performs a single Redis PING bounded by cfg.OperationTimeout to
// confirm that the configured Redis endpoint is reachable at startup.
func pingRedis(ctx context.Context, cfg config.RedisConfig, client *redis.Client) error {
if client == nil {
return fmt.Errorf("ping redis: nil client")
}
pingCtx, cancel := context.WithTimeout(ctx, cfg.OperationTimeout)
defer cancel()
if err := client.Ping(pingCtx).Err(); err != nil {
return fmt.Errorf("ping redis: %w", err)
}
return nil
}
+72
View File
@@ -0,0 +1,72 @@
package app
import (
"context"
"testing"
"time"
"galaxy/lobby/internal/config"
"github.com/alicebob/miniredis/v2"
"github.com/stretchr/testify/require"
)
func TestPingRedisSucceedsAgainstMiniredis(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
redisCfg := config.RedisConfig{
Addr: server.Addr(),
OperationTimeout: time.Second,
}
client := newRedisClient(redisCfg)
t.Cleanup(func() { _ = client.Close() })
require.NoError(t, pingRedis(context.Background(), redisCfg, client))
}
func TestPingRedisReturnsErrorWhenClosed(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
redisCfg := config.RedisConfig{
Addr: server.Addr(),
OperationTimeout: time.Second,
}
client := newRedisClient(redisCfg)
require.NoError(t, client.Close())
err := pingRedis(context.Background(), redisCfg, client)
require.Error(t, err)
}
func TestPingRedisNilClient(t *testing.T) {
t.Parallel()
err := pingRedis(context.Background(), config.RedisConfig{OperationTimeout: time.Second}, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "nil client")
}
func TestInstrumentRedisClientNilClient(t *testing.T) {
t.Parallel()
err := instrumentRedisClient(nil, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "nil client")
}
func TestInstrumentRedisClientNilTelemetryIsNoop(t *testing.T) {
t.Parallel()
server := miniredis.RunT(t)
client := newRedisClient(config.RedisConfig{
Addr: server.Addr(),
OperationTimeout: time.Second,
})
t.Cleanup(func() { _ = client.Close() })
require.NoError(t, instrumentRedisClient(client, nil))
}
+280
View File
@@ -0,0 +1,280 @@
package app
import (
"context"
"errors"
"fmt"
"log/slog"
"time"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/api/internalhttp"
"galaxy/lobby/internal/api/publichttp"
"galaxy/lobby/internal/config"
"galaxy/lobby/internal/domain/game"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
)
// activeGamesProbe adapts ports.GameStore to telemetry.ActiveGamesProbe by
// converting domain status keys into the string-typed map the telemetry
// runtime consumes.
type activeGamesProbe struct {
games ports.GameStore
}
func (probe activeGamesProbe) CountByStatus(ctx context.Context) (map[string]int, error) {
counts, err := probe.games.CountByStatus(ctx)
if err != nil {
return nil, err
}
out := make(map[string]int, len(counts))
for status, count := range counts {
out[string(status)] = count
}
return out, nil
}
var _ telemetry.ActiveGamesProbe = activeGamesProbe{}
// Compile-time assertion that the active-games probe key set matches the
// frozen game.Status vocabulary; helps surface drift if a new status is
// introduced without updating telemetry attribute documentation.
var _ = game.AllStatuses
// Runtime owns the runnable Game Lobby Service process plus the cleanup
// functions that release runtime resources after shutdown.
type Runtime struct {
cfg config.Config
app *App
wiring *wiring
publicServer *publichttp.Server
internalServer *internalhttp.Server
cleanupFns []func() error
}
// NewRuntime constructs the runnable Game Lobby Service process from cfg.
func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*Runtime, error) {
if ctx == nil {
return nil, errors.New("new lobby runtime: nil context")
}
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("new lobby runtime: %w", err)
}
if logger == nil {
logger = slog.Default()
}
runtime := &Runtime{
cfg: cfg,
}
cleanupOnError := func(err error) (*Runtime, error) {
if cleanupErr := runtime.Close(); cleanupErr != nil {
return nil, fmt.Errorf("%w; cleanup: %w", err, cleanupErr)
}
return nil, err
}
telemetryRuntime, err := telemetry.NewProcess(ctx, telemetry.ProcessConfig{
ServiceName: cfg.Telemetry.ServiceName,
TracesExporter: cfg.Telemetry.TracesExporter,
MetricsExporter: cfg.Telemetry.MetricsExporter,
TracesProtocol: cfg.Telemetry.TracesProtocol,
MetricsProtocol: cfg.Telemetry.MetricsProtocol,
StdoutTracesEnabled: cfg.Telemetry.StdoutTracesEnabled,
StdoutMetricsEnabled: cfg.Telemetry.StdoutMetricsEnabled,
}, logger)
if err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: telemetry: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
shutdownCtx, cancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
defer cancel()
return telemetryRuntime.Shutdown(shutdownCtx)
})
redisClient := newRedisClient(cfg.Redis)
if err := instrumentRedisClient(redisClient, telemetryRuntime); err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: %w", err))
}
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
return redisClient.Close()
})
if err := pingRedis(ctx, cfg.Redis, redisClient); err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: %w", err))
}
wiring, err := newWiring(cfg, redisClient, time.Now, logger, telemetryRuntime)
if err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: wiring: %w", err))
}
runtime.wiring = wiring
streamLagProbe, err := redisstate.NewStreamLagProbe(redisClient, time.Now)
if err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: stream lag probe: %w", err))
}
if err := telemetryRuntime.RegisterGauges(telemetry.GaugeDependencies{
ActiveGames: activeGamesProbe{games: wiring.gameStore},
StreamLag: streamLagProbe,
Offsets: wiring.streamOffsetStore,
GMEvents: telemetry.StreamGaugeBinding{
OffsetLabel: "gm_lobby_events",
StreamName: cfg.Redis.GMEventsStream,
},
RuntimeResults: telemetry.StreamGaugeBinding{
OffsetLabel: "runtime_results",
StreamName: cfg.Redis.RuntimeJobResultsStream,
},
UserLifecycle: telemetry.StreamGaugeBinding{
OffsetLabel: "user_lifecycle",
StreamName: cfg.Redis.UserLifecycleStream,
},
Logger: logger,
}); err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: register gauges: %w", err))
}
publicServer, err := publichttp.NewServer(publichttp.Config{
Addr: cfg.PublicHTTP.Addr,
ReadHeaderTimeout: cfg.PublicHTTP.ReadHeaderTimeout,
ReadTimeout: cfg.PublicHTTP.ReadTimeout,
IdleTimeout: cfg.PublicHTTP.IdleTimeout,
}, publichttp.Dependencies{
Logger: logger,
Telemetry: telemetryRuntime,
CreateGame: wiring.createGame,
UpdateGame: wiring.updateGame,
OpenEnrollment: wiring.openEnrollment,
CancelGame: wiring.cancelGame,
ManualReadyToStart: wiring.manualReadyToStart,
StartGame: wiring.startGame,
RetryStartGame: wiring.retryStartGame,
PauseGame: wiring.pauseGame,
ResumeGame: wiring.resumeGame,
SubmitApplication: wiring.submitApplication,
ApproveApplication: wiring.approveApplication,
RejectApplication: wiring.rejectApplication,
CreateInvite: wiring.createInvite,
RedeemInvite: wiring.redeemInvite,
DeclineInvite: wiring.declineInvite,
RevokeInvite: wiring.revokeInvite,
RemoveMember: wiring.removeMember,
BlockMember: wiring.blockMember,
RegisterRaceName: wiring.registerRaceName,
ListMyRaceNames: wiring.listMyRaceNames,
GetGame: wiring.getGame,
ListGames: wiring.listGames,
ListMemberships: wiring.listMemberships,
ListMyGames: wiring.listMyGames,
ListMyApplications: wiring.listMyApplications,
ListMyInvites: wiring.listMyInvites,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: public HTTP server: %w", err))
}
runtime.publicServer = publicServer
internalServer, err := internalhttp.NewServer(internalhttp.Config{
Addr: cfg.InternalHTTP.Addr,
ReadHeaderTimeout: cfg.InternalHTTP.ReadHeaderTimeout,
ReadTimeout: cfg.InternalHTTP.ReadTimeout,
IdleTimeout: cfg.InternalHTTP.IdleTimeout,
}, internalhttp.Dependencies{
Logger: logger,
Telemetry: telemetryRuntime,
CreateGame: wiring.createGame,
UpdateGame: wiring.updateGame,
OpenEnrollment: wiring.openEnrollment,
CancelGame: wiring.cancelGame,
ManualReadyToStart: wiring.manualReadyToStart,
StartGame: wiring.startGame,
RetryStartGame: wiring.retryStartGame,
PauseGame: wiring.pauseGame,
ResumeGame: wiring.resumeGame,
ApproveApplication: wiring.approveApplication,
RejectApplication: wiring.rejectApplication,
RemoveMember: wiring.removeMember,
BlockMember: wiring.blockMember,
GetGame: wiring.getGame,
ListGames: wiring.listGames,
ListMemberships: wiring.listMemberships,
})
if err != nil {
return cleanupOnError(fmt.Errorf("new lobby runtime: internal HTTP server: %w", err))
}
runtime.internalServer = internalServer
runtime.app = New(
cfg,
publicServer,
internalServer,
wiring.enrollmentAutomation,
wiring.runtimeJobResultConsumer,
wiring.gmEventsConsumer,
wiring.pendingRegistration,
wiring.userLifecycleConsumer,
)
return runtime, nil
}
// PublicServer returns the public HTTP server owned by runtime. It is
// primarily exposed for tests; production code should not depend on it.
func (runtime *Runtime) PublicServer() *publichttp.Server {
if runtime == nil {
return nil
}
return runtime.publicServer
}
// InternalServer returns the internal HTTP server owned by runtime. It is
// primarily exposed for tests; production code should not depend on it.
func (runtime *Runtime) InternalServer() *internalhttp.Server {
if runtime == nil {
return nil
}
return runtime.internalServer
}
// Run serves the public and internal HTTP listeners until ctx is canceled or
// one component fails.
func (runtime *Runtime) Run(ctx context.Context) error {
if ctx == nil {
return errors.New("run lobby runtime: nil context")
}
if runtime == nil {
return errors.New("run lobby runtime: nil runtime")
}
if runtime.app == nil {
return errors.New("run lobby runtime: nil app")
}
return runtime.app.Run(ctx)
}
// Close releases every runtime dependency in reverse construction order.
// Close is safe to call multiple times.
func (runtime *Runtime) Close() error {
if runtime == nil {
return nil
}
var joined error
for index := len(runtime.cleanupFns) - 1; index >= 0; index-- {
if err := runtime.cleanupFns[index](); err != nil {
joined = errors.Join(joined, err)
}
}
runtime.cleanupFns = nil
return joined
}
+159
View File
@@ -0,0 +1,159 @@
package app
import (
"context"
"io"
"log/slog"
"net"
"net/http"
"os"
"testing"
"time"
"galaxy/lobby/internal/api/internalhttp"
"galaxy/lobby/internal/api/publichttp"
"galaxy/lobby/internal/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
testcontainers "github.com/testcontainers/testcontainers-go"
rediscontainer "github.com/testcontainers/testcontainers-go/modules/redis"
)
const (
realRuntimeSmokeEnv = "LOBBY_REAL_RUNTIME_SMOKE"
realRuntimeRedisImage = "redis:7"
)
// TestRealRuntimeCompatibility boots the full Runtime against a real Redis
// container, verifies that both HTTP listeners serve /healthz and /readyz,
// and asserts graceful shutdown on context cancellation. The test is skipped
// unless LOBBY_REAL_RUNTIME_SMOKE=1 because it depends on Docker.
func TestRealRuntimeCompatibility(t *testing.T) {
if os.Getenv(realRuntimeSmokeEnv) != "1" {
t.Skipf("set %s=1 to run the real runtime smoke suite", realRuntimeSmokeEnv)
}
ctx := context.Background()
redisContainer, err := rediscontainer.Run(ctx, realRuntimeRedisImage)
require.NoError(t, err)
testcontainers.CleanupContainer(t, redisContainer)
redisAddr, err := redisContainer.Endpoint(ctx, "")
require.NoError(t, err)
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisAddr
cfg.UserService.BaseURL = "http://127.0.0.1:1"
cfg.GM.BaseURL = "http://127.0.0.1:1"
cfg.PublicHTTP.Addr = mustFreeAddr(t)
cfg.InternalHTTP.Addr = mustFreeAddr(t)
cfg.ShutdownTimeout = 2 * time.Second
cfg.Telemetry.TracesExporter = "none"
cfg.Telemetry.MetricsExporter = "none"
runtime, err := NewRuntime(context.Background(), cfg, testLogger())
require.NoError(t, err)
defer func() {
require.NoError(t, runtime.Close())
}()
runCtx, cancel := context.WithCancel(context.Background())
defer cancel()
runErrCh := make(chan error, 1)
go func() {
runErrCh <- runtime.Run(runCtx)
}()
client := newTestHTTPClient(t)
waitForRuntimeReady(t, client, cfg.PublicHTTP.Addr, publichttp.ReadyzPath)
waitForRuntimeReady(t, client, cfg.InternalHTTP.Addr, internalhttp.ReadyzPath)
assertHTTPStatus(t, client, "http://"+cfg.PublicHTTP.Addr+publichttp.HealthzPath, http.StatusOK)
assertHTTPStatus(t, client, "http://"+cfg.PublicHTTP.Addr+publichttp.ReadyzPath, http.StatusOK)
assertHTTPStatus(t, client, "http://"+cfg.InternalHTTP.Addr+internalhttp.HealthzPath, http.StatusOK)
assertHTTPStatus(t, client, "http://"+cfg.InternalHTTP.Addr+internalhttp.ReadyzPath, http.StatusOK)
cancel()
waitForRunResult(t, runErrCh, cfg.ShutdownTimeout+2*time.Second)
}
func testLogger() *slog.Logger {
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
func newTestHTTPClient(t *testing.T) *http.Client {
t.Helper()
transport := &http.Transport{DisableKeepAlives: true}
t.Cleanup(transport.CloseIdleConnections)
return &http.Client{
Timeout: 500 * time.Millisecond,
Transport: transport,
}
}
func waitForRuntimeReady(t *testing.T, client *http.Client, addr string, path string) {
t.Helper()
require.Eventually(t, func() bool {
request, err := http.NewRequest(http.MethodGet, "http://"+addr+path, nil)
if err != nil {
return false
}
response, err := client.Do(request)
if err != nil {
return false
}
defer response.Body.Close()
_, _ = io.Copy(io.Discard, response.Body)
return response.StatusCode == http.StatusOK
}, 5*time.Second, 25*time.Millisecond, "lobby runtime did not become reachable on %s", addr)
}
func waitForRunResult(t *testing.T, runErrCh <-chan error, waitTimeout time.Duration) {
t.Helper()
var err error
require.Eventually(t, func() bool {
select {
case err = <-runErrCh:
return true
default:
return false
}
}, waitTimeout, 10*time.Millisecond, "lobby runtime did not stop")
require.NoError(t, err)
}
func assertHTTPStatus(t *testing.T, client *http.Client, target string, want int) {
t.Helper()
request, err := http.NewRequest(http.MethodGet, target, nil)
require.NoError(t, err)
response, err := client.Do(request)
require.NoError(t, err)
defer response.Body.Close()
_, _ = io.Copy(io.Discard, response.Body)
require.Equal(t, want, response.StatusCode)
}
func mustFreeAddr(t *testing.T) string {
t.Helper()
listener, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
defer func() {
assert.NoError(t, listener.Close())
}()
return listener.Addr().String()
}
+151
View File
@@ -0,0 +1,151 @@
package app
import (
"context"
"net"
"net/http"
"testing"
"time"
"galaxy/lobby/internal/api/internalhttp"
"galaxy/lobby/internal/api/publichttp"
"galaxy/lobby/internal/config"
"github.com/alicebob/miniredis/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// newTestConfig builds a valid Config that listens on ephemeral ports and a
// miniredis instance provided by redisServer.
func newTestConfig(t *testing.T, redisAddr string) config.Config {
t.Helper()
reserve := func() string {
listener, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
addr := listener.Addr().String()
require.NoError(t, listener.Close())
return addr
}
cfg := config.DefaultConfig()
cfg.Redis.Addr = redisAddr
cfg.UserService.BaseURL = "http://127.0.0.1:1"
cfg.GM.BaseURL = "http://127.0.0.1:1"
cfg.PublicHTTP.Addr = reserve()
cfg.InternalHTTP.Addr = reserve()
return cfg
}
func TestNewRuntimeValidatesContext(t *testing.T) {
t.Parallel()
_, err := NewRuntime(nil, config.Config{}, nil) //nolint:staticcheck // test exercises the nil-context guard.
require.Error(t, err)
require.Contains(t, err.Error(), "nil context")
}
func TestNewRuntimeRejectsInvalidConfig(t *testing.T) {
t.Parallel()
_, err := NewRuntime(context.Background(), config.Config{}, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "new lobby runtime")
}
func TestNewRuntimeSucceedsWithMiniredis(t *testing.T) {
redisServer := miniredis.RunT(t)
runtime, err := NewRuntime(context.Background(), newTestConfig(t, redisServer.Addr()), nil)
require.NoError(t, err)
require.NotNil(t, runtime)
t.Cleanup(func() { _ = runtime.Close() })
assert.NotNil(t, runtime.PublicServer())
assert.NotNil(t, runtime.InternalServer())
}
func TestNewRuntimeWiresRaceNameDirectory(t *testing.T) {
redisServer := miniredis.RunT(t)
runtime, err := NewRuntime(context.Background(), newTestConfig(t, redisServer.Addr()), nil)
require.NoError(t, err)
t.Cleanup(func() { _ = runtime.Close() })
require.NotNil(t, runtime.wiring)
assert.NotNil(t, runtime.wiring.raceNameDirectory)
}
func TestNewRuntimeFailsWhenRedisUnreachable(t *testing.T) {
t.Parallel()
cfg := newTestConfig(t, "127.0.0.1:1") // guaranteed unreachable
cfg.Redis.OperationTimeout = 100 * time.Millisecond
_, err := NewRuntime(context.Background(), cfg, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "ping redis")
}
func TestRuntimeCloseIsIdempotent(t *testing.T) {
redisServer := miniredis.RunT(t)
runtime, err := NewRuntime(context.Background(), newTestConfig(t, redisServer.Addr()), nil)
require.NoError(t, err)
require.NoError(t, runtime.Close())
require.NoError(t, runtime.Close())
}
func TestRuntimeRunServesProbesAndStopsOnCancel(t *testing.T) {
redisServer := miniredis.RunT(t)
cfg := newTestConfig(t, redisServer.Addr())
runtime, err := NewRuntime(context.Background(), cfg, nil)
require.NoError(t, err)
t.Cleanup(func() { _ = runtime.Close() })
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
runErr := make(chan error, 1)
go func() {
runErr <- runtime.Run(ctx)
}()
require.Eventually(t, func() bool {
return runtime.PublicServer().Addr() != "" && runtime.InternalServer().Addr() != ""
}, 2*time.Second, 10*time.Millisecond)
for _, probe := range []struct {
label string
url string
}{
{"public healthz", "http://" + runtime.PublicServer().Addr() + publichttp.HealthzPath},
{"public readyz", "http://" + runtime.PublicServer().Addr() + publichttp.ReadyzPath},
{"internal healthz", "http://" + runtime.InternalServer().Addr() + internalhttp.HealthzPath},
{"internal readyz", "http://" + runtime.InternalServer().Addr() + internalhttp.ReadyzPath},
} {
resp, err := http.Get(probe.url)
require.NoError(t, err, probe.label)
_ = resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, probe.label)
}
cancel()
select {
case err := <-runErr:
require.NoError(t, err)
case <-time.After(3 * time.Second):
t.Fatal("runtime did not stop after cancel")
}
}
func TestRuntimeRunNilContext(t *testing.T) {
t.Parallel()
var runtime *Runtime
require.Error(t, runtime.Run(context.Background()))
}
+785
View File
@@ -0,0 +1,785 @@
package app
import (
"errors"
"fmt"
"log/slog"
"time"
"galaxy/lobby/internal/adapters/gmclient"
"galaxy/lobby/internal/adapters/idgen"
"galaxy/lobby/internal/adapters/metricsintentpub"
"galaxy/lobby/internal/adapters/metricsracenamedir"
"galaxy/lobby/internal/adapters/racenameintents"
"galaxy/lobby/internal/adapters/racenamestub"
"galaxy/lobby/internal/adapters/redisstate"
"galaxy/lobby/internal/adapters/runtimemanager"
"galaxy/lobby/internal/adapters/userlifecycle"
"galaxy/lobby/internal/adapters/userservice"
"galaxy/lobby/internal/config"
"galaxy/lobby/internal/domain/racename"
"galaxy/lobby/internal/ports"
"galaxy/lobby/internal/telemetry"
"galaxy/lobby/internal/service/approveapplication"
"galaxy/lobby/internal/service/blockmember"
"galaxy/lobby/internal/service/cancelgame"
"galaxy/lobby/internal/service/createinvite"
"galaxy/lobby/internal/service/creategame"
"galaxy/lobby/internal/service/declineinvite"
"galaxy/lobby/internal/service/getgame"
"galaxy/lobby/internal/service/listgames"
"galaxy/lobby/internal/service/listmemberships"
"galaxy/lobby/internal/service/listmyapplications"
"galaxy/lobby/internal/service/listmygames"
"galaxy/lobby/internal/service/listmyinvites"
"galaxy/lobby/internal/service/listmyracenames"
"galaxy/lobby/internal/service/manualreadytostart"
"galaxy/lobby/internal/service/openenrollment"
"galaxy/lobby/internal/service/pausegame"
"galaxy/lobby/internal/service/redeeminvite"
"galaxy/lobby/internal/service/registerracename"
"galaxy/lobby/internal/service/rejectapplication"
"galaxy/lobby/internal/service/removemember"
"galaxy/lobby/internal/service/resumegame"
"galaxy/lobby/internal/service/capabilityevaluation"
"galaxy/lobby/internal/service/retrystartgame"
"galaxy/lobby/internal/service/revokeinvite"
"galaxy/lobby/internal/service/startgame"
"galaxy/lobby/internal/service/submitapplication"
"galaxy/lobby/internal/service/updategame"
"galaxy/lobby/internal/worker/enrollmentautomation"
"galaxy/lobby/internal/worker/gmevents"
"galaxy/lobby/internal/worker/pendingregistration"
"galaxy/lobby/internal/worker/runtimejobresult"
userlifecycleworker "galaxy/lobby/internal/worker/userlifecycle"
"galaxy/notificationintent"
"github.com/redis/go-redis/v9"
)
// wiring owns the process-level singletons that downstream service
// constructors resolve through their port interfaces. It is the single
// place in the process where concrete adapter types are referenced, so
// service code always depends on ports rather than on specific adapters.
//
// extends this struct with the application/membership stores,
// the gap-activation store, the User Service client, the notification
// intent publisher, and the three application services.
type wiring struct {
// policy is the lobby-owned Race Name Directory canonical-key
// policy, shared between the RND adapter and any future service
// that needs to call Canonicalize directly.
policy *racename.Policy
// raceNameDirectory is the platform-wide in-game name uniqueness
// arbiter.
raceNameDirectory ports.RaceNameDirectory
// gameStore persists game records.
gameStore ports.GameStore
// applicationStore persists application records.
applicationStore ports.ApplicationStore
// inviteStore persists invite records.
inviteStore ports.InviteStore
// membershipStore persists membership records.
membershipStore ports.MembershipStore
// gapActivationStore records when a game's gap window opens
//.
gapActivationStore ports.GapActivationStore
// userService is the synchronous User Service eligibility client
//.
userService ports.UserService
// intentPublisher publishes notification intents to
// notification:intents.
intentPublisher ports.IntentPublisher
// idGenerator produces opaque identifiers for new records.
idGenerator ports.IDGenerator
// createGame handles `lobby.game.create`.
createGame *creategame.Service
// updateGame handles `lobby.game.update`.
updateGame *updategame.Service
// openEnrollment handles `lobby.game.open_enrollment`.
openEnrollment *openenrollment.Service
// cancelGame handles `lobby.game.cancel`.
cancelGame *cancelgame.Service
// manualReadyToStart handles `lobby.game.ready_to_start`.
manualReadyToStart *manualreadytostart.Service
// enrollmentAutomation drives the periodic auto-close worker
//.
enrollmentAutomation *enrollmentautomation.Worker
// submitApplication handles `lobby.application.submit`.
submitApplication *submitapplication.Service
// approveApplication handles `lobby.application.approve`.
approveApplication *approveapplication.Service
// rejectApplication handles `lobby.application.reject`.
rejectApplication *rejectapplication.Service
// createInvite handles `lobby.invite.create`.
createInvite *createinvite.Service
// redeemInvite handles `lobby.invite.redeem`.
redeemInvite *redeeminvite.Service
// declineInvite handles `lobby.invite.decline`.
declineInvite *declineinvite.Service
// revokeInvite handles `lobby.invite.revoke`.
revokeInvite *revokeinvite.Service
// runtimeManager publishes start and stop jobs to Runtime Manager
//.
runtimeManager ports.RuntimeManager
// gmClient registers running games with Game Master.
gmClient ports.GMClient
// streamOffsetStore persists Redis Streams consumer progress
//.
streamOffsetStore ports.StreamOffsetStore
// startGame handles `lobby.game.start`.
startGame *startgame.Service
// retryStartGame handles `lobby.game.retry_start`.
retryStartGame *retrystartgame.Service
// pauseGame handles `lobby.game.pause`.
pauseGame *pausegame.Service
// resumeGame handles `lobby.game.resume`.
resumeGame *resumegame.Service
// removeMember handles `lobby.membership.remove`.
removeMember *removemember.Service
// blockMember handles `lobby.membership.block`.
blockMember *blockmember.Service
// registerRaceName handles `lobby.race_name.register`.
registerRaceName *registerracename.Service
// listMyRaceNames handles `lobby.race_names.list`.
listMyRaceNames *listmyracenames.Service
// getGame handles `lobby.game.get`.
getGame *getgame.Service
// listGames handles `lobby.games.list`.
listGames *listgames.Service
// listMemberships handles `lobby.memberships.list`.
listMemberships *listmemberships.Service
// listMyGames handles `lobby.my_games.list`.
listMyGames *listmygames.Service
// listMyApplications handles `lobby.my_applications.list`.
listMyApplications *listmyapplications.Service
// listMyInvites handles `lobby.my_invites.list`.
listMyInvites *listmyinvites.Service
// runtimeJobResultConsumer consumes runtime:job_results and drives
// the post-start sequence.
runtimeJobResultConsumer *runtimejobresult.Consumer
// gameTurnStatsStore persists the per-game per-user stats aggregate
// fed by every runtime_snapshot_update event.
gameTurnStatsStore ports.GameTurnStatsStore
// evaluationGuardStore stores the per-game «already evaluated»
// marker that keeps replayed game_finished events safe.
evaluationGuardStore ports.EvaluationGuardStore
// capabilityEvaluation runs the capability evaluator at
// game finish.
capabilityEvaluation *capabilityevaluation.Service
// gmEventsConsumer consumes gm:lobby_events and applies snapshot
// updates plus capability evaluation handoff.
gmEventsConsumer *gmevents.Consumer
// pendingRegistration releases expired Race Name Directory
// pending_registration entries on a periodic tick.
pendingRegistration *pendingregistration.Worker
// userLifecycleConsumer reads `user:lifecycle_events` from User
// Service and dispatches each entry to userLifecycleWorker. The
// consumer is the long-lived Component registered with app.New;
// the worker has no goroutine of its own.
userLifecycleConsumer *userlifecycle.Consumer
// userLifecycleWorker runs the cascade triggered by each lifecycle
// event.
userLifecycleWorker *userlifecycleworker.Worker
}
// newWiring constructs the process-level dependency set.
func newWiring(
cfg config.Config,
redisClient *redis.Client,
clock func() time.Time,
logger *slog.Logger,
telemetryRuntime *telemetry.Runtime,
) (*wiring, error) {
policy, err := racename.NewPolicy()
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
if clock == nil {
clock = time.Now
}
if logger == nil {
logger = slog.Default()
}
rawDirectory, err := buildRaceNameDirectory(cfg, redisClient, policy, clock)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
directory := metricsracenamedir.New(rawDirectory, telemetryRuntime)
if redisClient == nil {
return nil, errors.New("new lobby wiring: nil redis client")
}
gameStore, err := redisstate.NewGameStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
applicationStore, err := redisstate.NewApplicationStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
inviteStore, err := redisstate.NewInviteStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
membershipStore, err := redisstate.NewMembershipStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
gapActivationStore, err := redisstate.NewGapActivationStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
userServiceClient, err := userservice.NewClient(userservice.Config{
BaseURL: cfg.UserService.BaseURL,
Timeout: cfg.UserService.Timeout,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
rawIntentPublisher, err := notificationintent.NewPublisher(notificationintent.PublisherConfig{
Client: redisClient,
Stream: cfg.Redis.NotificationIntentsStream,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
intentPublisher := metricsintentpub.New(rawIntentPublisher, telemetryRuntime)
ids := idgen.NewGenerator()
createSvc, err := creategame.NewService(creategame.Dependencies{
Games: gameStore,
IDs: ids,
Clock: clock,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
updateSvc, err := updategame.NewService(updategame.Dependencies{
Games: gameStore,
Clock: clock,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
openSvc, err := openenrollment.NewService(openenrollment.Dependencies{
Games: gameStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
cancelSvc, err := cancelgame.NewService(cancelgame.Dependencies{
Games: gameStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
manualReadySvc, err := manualreadytostart.NewService(manualreadytostart.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Invites: inviteStore,
Intents: intentPublisher,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
submitSvc, err := submitapplication.NewService(submitapplication.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Applications: applicationStore,
Users: userServiceClient,
Directory: directory,
Intents: intentPublisher,
IDs: ids,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
approveSvc, err := approveapplication.NewService(approveapplication.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Applications: applicationStore,
Directory: directory,
GapStore: gapActivationStore,
Intents: intentPublisher,
IDs: ids,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
rejectSvc, err := rejectapplication.NewService(rejectapplication.Dependencies{
Games: gameStore,
Applications: applicationStore,
Directory: directory,
Intents: intentPublisher,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
createInviteSvc, err := createinvite.NewService(createinvite.Dependencies{
Games: gameStore,
Invites: inviteStore,
Memberships: membershipStore,
Intents: intentPublisher,
IDs: ids,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
redeemInviteSvc, err := redeeminvite.NewService(redeeminvite.Dependencies{
Games: gameStore,
Invites: inviteStore,
Memberships: membershipStore,
Directory: directory,
Users: userServiceClient,
GapStore: gapActivationStore,
Intents: intentPublisher,
IDs: ids,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
declineInviteSvc, err := declineinvite.NewService(declineinvite.Dependencies{
Invites: inviteStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
revokeInviteSvc, err := revokeinvite.NewService(revokeinvite.Dependencies{
Games: gameStore,
Invites: inviteStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
enrollmentWorker, err := enrollmentautomation.NewWorker(enrollmentautomation.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Invites: inviteStore,
Intents: intentPublisher,
GapStore: gapActivationStore,
Interval: cfg.EnrollmentAutomation.Interval,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
gmClientImpl, err := gmclient.NewClient(gmclient.Config{
BaseURL: cfg.GM.BaseURL,
Timeout: cfg.GM.Timeout,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
runtimePublisher, err := runtimemanager.NewPublisher(runtimemanager.Config{
Client: redisClient,
StartJobsStream: cfg.Redis.RuntimeStartJobsStream,
StopJobsStream: cfg.Redis.RuntimeStopJobsStream,
Clock: clock,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
streamOffsets, err := redisstate.NewStreamOffsetStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
startSvc, err := startgame.NewService(startgame.Dependencies{
Games: gameStore,
RuntimeManager: runtimePublisher,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
retrySvc, err := retrystartgame.NewService(retrystartgame.Dependencies{
Games: gameStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
pauseSvc, err := pausegame.NewService(pausegame.Dependencies{
Games: gameStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
resumeSvc, err := resumegame.NewService(resumegame.Dependencies{
Games: gameStore,
GM: gmClientImpl,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
removeMemberSvc, err := removemember.NewService(removemember.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Directory: directory,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
blockMemberSvc, err := blockmember.NewService(blockmember.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
registerRaceNameSvc, err := registerracename.NewService(registerracename.Dependencies{
Directory: directory,
Users: userServiceClient,
Intents: intentPublisher,
Clock: clock,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
listMyRaceNamesSvc, err := listmyracenames.NewService(listmyracenames.Dependencies{
Directory: directory,
Games: gameStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
getGameSvc, err := getgame.NewService(getgame.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Invites: inviteStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
listGamesSvc, err := listgames.NewService(listgames.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
listMembershipsSvc, err := listmemberships.NewService(listmemberships.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
listMyGamesSvc, err := listmygames.NewService(listmygames.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
listMyApplicationsSvc, err := listmyapplications.NewService(listmyapplications.Dependencies{
Games: gameStore,
Applications: applicationStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
listMyInvitesSvc, err := listmyinvites.NewService(listmyinvites.Dependencies{
Games: gameStore,
Invites: inviteStore,
Memberships: membershipStore,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
runtimeConsumer, err := runtimejobresult.NewConsumer(runtimejobresult.Config{
Client: redisClient,
Stream: cfg.Redis.RuntimeJobResultsStream,
BlockTimeout: cfg.Redis.RuntimeJobResultsReadBlockTimeout,
Games: gameStore,
RuntimeManager: runtimePublisher,
GMClient: gmClientImpl,
Intents: intentPublisher,
OffsetStore: streamOffsets,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
gameTurnStatsStore, err := redisstate.NewGameTurnStatsStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
evaluationGuardStore, err := redisstate.NewEvaluationGuardStore(redisClient)
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
raceNameIntents, err := racenameintents.NewPublisher(racenameintents.Config{
Publisher: intentPublisher,
Clock: clock,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
capabilityService, err := capabilityevaluation.NewService(capabilityevaluation.Dependencies{
Games: gameStore,
Memberships: membershipStore,
Stats: gameTurnStatsStore,
Directory: directory,
Intents: raceNameIntents,
Guard: evaluationGuardStore,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
gmConsumer, err := gmevents.NewConsumer(gmevents.Config{
Client: redisClient,
Stream: cfg.Redis.GMEventsStream,
BlockTimeout: cfg.Redis.GMEventsReadBlockTimeout,
Games: gameStore,
Stats: gameTurnStatsStore,
Capability: capabilityService,
OffsetStore: streamOffsets,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
pendingRegistrationWorker, err := pendingregistration.NewWorker(pendingregistration.Dependencies{
Directory: directory,
Interval: cfg.PendingRegistration.Interval,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
userLifecycleWorker, err := userlifecycleworker.NewWorker(userlifecycleworker.Dependencies{
Directory: directory,
Memberships: membershipStore,
Applications: applicationStore,
Invites: inviteStore,
Games: gameStore,
RuntimeManager: runtimePublisher,
Intents: intentPublisher,
Clock: clock,
Logger: logger,
Telemetry: telemetryRuntime,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
userLifecycleConsumer, err := userlifecycle.NewConsumer(userlifecycle.Config{
Client: redisClient,
Stream: cfg.Redis.UserLifecycleStream,
BlockTimeout: cfg.Redis.UserLifecycleReadBlockTimeout,
OffsetStore: streamOffsets,
Clock: clock,
Logger: logger,
})
if err != nil {
return nil, fmt.Errorf("new lobby wiring: %w", err)
}
userLifecycleConsumer.OnEvent(userLifecycleWorker.Handle)
return &wiring{
policy: policy,
raceNameDirectory: directory,
gameStore: gameStore,
applicationStore: applicationStore,
inviteStore: inviteStore,
membershipStore: membershipStore,
gapActivationStore: gapActivationStore,
userService: userServiceClient,
intentPublisher: intentPublisher,
idGenerator: ids,
createGame: createSvc,
updateGame: updateSvc,
openEnrollment: openSvc,
cancelGame: cancelSvc,
manualReadyToStart: manualReadySvc,
submitApplication: submitSvc,
approveApplication: approveSvc,
rejectApplication: rejectSvc,
createInvite: createInviteSvc,
redeemInvite: redeemInviteSvc,
declineInvite: declineInviteSvc,
revokeInvite: revokeInviteSvc,
enrollmentAutomation: enrollmentWorker,
runtimeManager: runtimePublisher,
gmClient: gmClientImpl,
streamOffsetStore: streamOffsets,
startGame: startSvc,
retryStartGame: retrySvc,
pauseGame: pauseSvc,
resumeGame: resumeSvc,
removeMember: removeMemberSvc,
blockMember: blockMemberSvc,
registerRaceName: registerRaceNameSvc,
listMyRaceNames: listMyRaceNamesSvc,
getGame: getGameSvc,
listGames: listGamesSvc,
listMemberships: listMembershipsSvc,
listMyGames: listMyGamesSvc,
listMyApplications: listMyApplicationsSvc,
listMyInvites: listMyInvitesSvc,
runtimeJobResultConsumer: runtimeConsumer,
gameTurnStatsStore: gameTurnStatsStore,
evaluationGuardStore: evaluationGuardStore,
capabilityEvaluation: capabilityService,
gmEventsConsumer: gmConsumer,
pendingRegistration: pendingRegistrationWorker,
userLifecycleConsumer: userLifecycleConsumer,
userLifecycleWorker: userLifecycleWorker,
}, nil
}
// buildRaceNameDirectory instantiates the Race Name Directory adapter
// selected by cfg.RaceNameDirectory.Backend.
func buildRaceNameDirectory(
cfg config.Config,
redisClient *redis.Client,
policy *racename.Policy,
clock func() time.Time,
) (ports.RaceNameDirectory, error) {
switch cfg.RaceNameDirectory.Backend {
case config.RaceNameDirectoryBackendRedis:
if redisClient == nil {
return nil, errors.New("redis race name directory backend requires a Redis client")
}
return redisstate.NewRaceNameDirectory(
redisClient,
policy,
redisstate.WithRaceNameDirectoryClock(clock),
)
case config.RaceNameDirectoryBackendStub:
return racenamestub.NewDirectory(racenamestub.WithClock(clock))
default:
return nil, fmt.Errorf("unsupported race name directory backend %q", cfg.RaceNameDirectory.Backend)
}
}