feat: game lobby service
This commit is contained in:
@@ -0,0 +1,280 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"galaxy/lobby/internal/adapters/redisstate"
|
||||
"galaxy/lobby/internal/api/internalhttp"
|
||||
"galaxy/lobby/internal/api/publichttp"
|
||||
"galaxy/lobby/internal/config"
|
||||
"galaxy/lobby/internal/domain/game"
|
||||
"galaxy/lobby/internal/ports"
|
||||
"galaxy/lobby/internal/telemetry"
|
||||
)
|
||||
|
||||
// activeGamesProbe adapts ports.GameStore to telemetry.ActiveGamesProbe by
|
||||
// converting domain status keys into the string-typed map the telemetry
|
||||
// runtime consumes.
|
||||
type activeGamesProbe struct {
|
||||
games ports.GameStore
|
||||
}
|
||||
|
||||
func (probe activeGamesProbe) CountByStatus(ctx context.Context) (map[string]int, error) {
|
||||
counts, err := probe.games.CountByStatus(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := make(map[string]int, len(counts))
|
||||
for status, count := range counts {
|
||||
out[string(status)] = count
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
var _ telemetry.ActiveGamesProbe = activeGamesProbe{}
|
||||
|
||||
// Compile-time assertion that the active-games probe key set matches the
|
||||
// frozen game.Status vocabulary; helps surface drift if a new status is
|
||||
// introduced without updating telemetry attribute documentation.
|
||||
var _ = game.AllStatuses
|
||||
|
||||
// Runtime owns the runnable Game Lobby Service process plus the cleanup
|
||||
// functions that release runtime resources after shutdown.
|
||||
type Runtime struct {
|
||||
cfg config.Config
|
||||
|
||||
app *App
|
||||
|
||||
wiring *wiring
|
||||
|
||||
publicServer *publichttp.Server
|
||||
internalServer *internalhttp.Server
|
||||
|
||||
cleanupFns []func() error
|
||||
}
|
||||
|
||||
// NewRuntime constructs the runnable Game Lobby Service process from cfg.
|
||||
func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*Runtime, error) {
|
||||
if ctx == nil {
|
||||
return nil, errors.New("new lobby runtime: nil context")
|
||||
}
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("new lobby runtime: %w", err)
|
||||
}
|
||||
if logger == nil {
|
||||
logger = slog.Default()
|
||||
}
|
||||
|
||||
runtime := &Runtime{
|
||||
cfg: cfg,
|
||||
}
|
||||
|
||||
cleanupOnError := func(err error) (*Runtime, error) {
|
||||
if cleanupErr := runtime.Close(); cleanupErr != nil {
|
||||
return nil, fmt.Errorf("%w; cleanup: %w", err, cleanupErr)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
telemetryRuntime, err := telemetry.NewProcess(ctx, telemetry.ProcessConfig{
|
||||
ServiceName: cfg.Telemetry.ServiceName,
|
||||
TracesExporter: cfg.Telemetry.TracesExporter,
|
||||
MetricsExporter: cfg.Telemetry.MetricsExporter,
|
||||
TracesProtocol: cfg.Telemetry.TracesProtocol,
|
||||
MetricsProtocol: cfg.Telemetry.MetricsProtocol,
|
||||
StdoutTracesEnabled: cfg.Telemetry.StdoutTracesEnabled,
|
||||
StdoutMetricsEnabled: cfg.Telemetry.StdoutMetricsEnabled,
|
||||
}, logger)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new lobby runtime: telemetry: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
|
||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
|
||||
defer cancel()
|
||||
return telemetryRuntime.Shutdown(shutdownCtx)
|
||||
})
|
||||
|
||||
redisClient := newRedisClient(cfg.Redis)
|
||||
if err := instrumentRedisClient(redisClient, telemetryRuntime); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new lobby runtime: %w", err))
|
||||
}
|
||||
runtime.cleanupFns = append(runtime.cleanupFns, func() error {
|
||||
return redisClient.Close()
|
||||
})
|
||||
if err := pingRedis(ctx, cfg.Redis, redisClient); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new lobby runtime: %w", err))
|
||||
}
|
||||
|
||||
wiring, err := newWiring(cfg, redisClient, time.Now, logger, telemetryRuntime)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new lobby runtime: wiring: %w", err))
|
||||
}
|
||||
runtime.wiring = wiring
|
||||
|
||||
streamLagProbe, err := redisstate.NewStreamLagProbe(redisClient, time.Now)
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new lobby runtime: stream lag probe: %w", err))
|
||||
}
|
||||
if err := telemetryRuntime.RegisterGauges(telemetry.GaugeDependencies{
|
||||
ActiveGames: activeGamesProbe{games: wiring.gameStore},
|
||||
StreamLag: streamLagProbe,
|
||||
Offsets: wiring.streamOffsetStore,
|
||||
GMEvents: telemetry.StreamGaugeBinding{
|
||||
OffsetLabel: "gm_lobby_events",
|
||||
StreamName: cfg.Redis.GMEventsStream,
|
||||
},
|
||||
RuntimeResults: telemetry.StreamGaugeBinding{
|
||||
OffsetLabel: "runtime_results",
|
||||
StreamName: cfg.Redis.RuntimeJobResultsStream,
|
||||
},
|
||||
UserLifecycle: telemetry.StreamGaugeBinding{
|
||||
OffsetLabel: "user_lifecycle",
|
||||
StreamName: cfg.Redis.UserLifecycleStream,
|
||||
},
|
||||
Logger: logger,
|
||||
}); err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new lobby runtime: register gauges: %w", err))
|
||||
}
|
||||
|
||||
publicServer, err := publichttp.NewServer(publichttp.Config{
|
||||
Addr: cfg.PublicHTTP.Addr,
|
||||
ReadHeaderTimeout: cfg.PublicHTTP.ReadHeaderTimeout,
|
||||
ReadTimeout: cfg.PublicHTTP.ReadTimeout,
|
||||
IdleTimeout: cfg.PublicHTTP.IdleTimeout,
|
||||
}, publichttp.Dependencies{
|
||||
Logger: logger,
|
||||
Telemetry: telemetryRuntime,
|
||||
CreateGame: wiring.createGame,
|
||||
UpdateGame: wiring.updateGame,
|
||||
OpenEnrollment: wiring.openEnrollment,
|
||||
CancelGame: wiring.cancelGame,
|
||||
ManualReadyToStart: wiring.manualReadyToStart,
|
||||
StartGame: wiring.startGame,
|
||||
RetryStartGame: wiring.retryStartGame,
|
||||
PauseGame: wiring.pauseGame,
|
||||
ResumeGame: wiring.resumeGame,
|
||||
SubmitApplication: wiring.submitApplication,
|
||||
ApproveApplication: wiring.approveApplication,
|
||||
RejectApplication: wiring.rejectApplication,
|
||||
CreateInvite: wiring.createInvite,
|
||||
RedeemInvite: wiring.redeemInvite,
|
||||
DeclineInvite: wiring.declineInvite,
|
||||
RevokeInvite: wiring.revokeInvite,
|
||||
RemoveMember: wiring.removeMember,
|
||||
BlockMember: wiring.blockMember,
|
||||
RegisterRaceName: wiring.registerRaceName,
|
||||
ListMyRaceNames: wiring.listMyRaceNames,
|
||||
GetGame: wiring.getGame,
|
||||
ListGames: wiring.listGames,
|
||||
ListMemberships: wiring.listMemberships,
|
||||
ListMyGames: wiring.listMyGames,
|
||||
ListMyApplications: wiring.listMyApplications,
|
||||
ListMyInvites: wiring.listMyInvites,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new lobby runtime: public HTTP server: %w", err))
|
||||
}
|
||||
runtime.publicServer = publicServer
|
||||
|
||||
internalServer, err := internalhttp.NewServer(internalhttp.Config{
|
||||
Addr: cfg.InternalHTTP.Addr,
|
||||
ReadHeaderTimeout: cfg.InternalHTTP.ReadHeaderTimeout,
|
||||
ReadTimeout: cfg.InternalHTTP.ReadTimeout,
|
||||
IdleTimeout: cfg.InternalHTTP.IdleTimeout,
|
||||
}, internalhttp.Dependencies{
|
||||
Logger: logger,
|
||||
Telemetry: telemetryRuntime,
|
||||
CreateGame: wiring.createGame,
|
||||
UpdateGame: wiring.updateGame,
|
||||
OpenEnrollment: wiring.openEnrollment,
|
||||
CancelGame: wiring.cancelGame,
|
||||
ManualReadyToStart: wiring.manualReadyToStart,
|
||||
StartGame: wiring.startGame,
|
||||
RetryStartGame: wiring.retryStartGame,
|
||||
PauseGame: wiring.pauseGame,
|
||||
ResumeGame: wiring.resumeGame,
|
||||
ApproveApplication: wiring.approveApplication,
|
||||
RejectApplication: wiring.rejectApplication,
|
||||
RemoveMember: wiring.removeMember,
|
||||
BlockMember: wiring.blockMember,
|
||||
GetGame: wiring.getGame,
|
||||
ListGames: wiring.listGames,
|
||||
ListMemberships: wiring.listMemberships,
|
||||
})
|
||||
if err != nil {
|
||||
return cleanupOnError(fmt.Errorf("new lobby runtime: internal HTTP server: %w", err))
|
||||
}
|
||||
runtime.internalServer = internalServer
|
||||
|
||||
runtime.app = New(
|
||||
cfg,
|
||||
publicServer,
|
||||
internalServer,
|
||||
wiring.enrollmentAutomation,
|
||||
wiring.runtimeJobResultConsumer,
|
||||
wiring.gmEventsConsumer,
|
||||
wiring.pendingRegistration,
|
||||
wiring.userLifecycleConsumer,
|
||||
)
|
||||
|
||||
return runtime, nil
|
||||
}
|
||||
|
||||
// PublicServer returns the public HTTP server owned by runtime. It is
|
||||
// primarily exposed for tests; production code should not depend on it.
|
||||
func (runtime *Runtime) PublicServer() *publichttp.Server {
|
||||
if runtime == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return runtime.publicServer
|
||||
}
|
||||
|
||||
// InternalServer returns the internal HTTP server owned by runtime. It is
|
||||
// primarily exposed for tests; production code should not depend on it.
|
||||
func (runtime *Runtime) InternalServer() *internalhttp.Server {
|
||||
if runtime == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return runtime.internalServer
|
||||
}
|
||||
|
||||
// Run serves the public and internal HTTP listeners until ctx is canceled or
|
||||
// one component fails.
|
||||
func (runtime *Runtime) Run(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
return errors.New("run lobby runtime: nil context")
|
||||
}
|
||||
if runtime == nil {
|
||||
return errors.New("run lobby runtime: nil runtime")
|
||||
}
|
||||
if runtime.app == nil {
|
||||
return errors.New("run lobby runtime: nil app")
|
||||
}
|
||||
|
||||
return runtime.app.Run(ctx)
|
||||
}
|
||||
|
||||
// Close releases every runtime dependency in reverse construction order.
|
||||
// Close is safe to call multiple times.
|
||||
func (runtime *Runtime) Close() error {
|
||||
if runtime == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var joined error
|
||||
for index := len(runtime.cleanupFns) - 1; index >= 0; index-- {
|
||||
if err := runtime.cleanupFns[index](); err != nil {
|
||||
joined = errors.Join(joined, err)
|
||||
}
|
||||
}
|
||||
runtime.cleanupFns = nil
|
||||
|
||||
return joined
|
||||
}
|
||||
Reference in New Issue
Block a user